From 30cb25584b55c686c905ec6c2fe4205ed00aacd9 Mon Sep 17 00:00:00 2001 From: Yee <2520865+yixinglu@users.noreply.github.com> Date: Thu, 10 Dec 2020 17:10:23 +0800 Subject: [PATCH] Merge 2.0 branch to master (#102) * nebula-importer supports nebula-graph v2-alpha-preview (#98) * feat: use nebula-go/v2 as nebula-graph client mod. * feat: wrap the VID value in double quotes when in the INSERT VERTEX statement. * feat: update metad, storaged, graphd to v2 version in docker-compose.yaml. * feat: quotes vid value. * feat: use vesoft/nebula-console:v2-preview-nightly as importer docker image. * feat: update ci/bootstrap.sh because nebula-console:v2-preview-nightly is based on alpine. * feat: remove uuid function, and use "(string)hash" instead of hash as vid value. * feat: use ubuntu-18.04 as github ci server. * feat: increase afterPeriod wait seconds in example.yaml. * feat: logger the importer init proceduces. * feat: add beforePeriod to wait postStart commands competition. * feat: use ubuntu-latest as github ci server. * feat: revert to previous afterPeriod. * feat: remove afterPeriod logger info. * style: just for re-trigger upstream PR CI. * Remove vendors (#101) * Remove vendor * Add vendorbuild target * Ignore vendor * Continue cleanup * Support nebula 2.0 data import (#99) * Use nebula go client 2.0 * Try to fix importer tests * Fix vid type error * Add more tests * Rename runner file * Fix crash * Fix Ci * Cleanup * Cleanup vendors * Restore 1.0 test csv data * Fix logger Co-authored-by: laishzh --- .gitignore | 5 +- Makefile | 9 + ci/bootstrap.sh | 20 +- docker-compose.yaml | 50 +- examples/example.yaml | 94 +- examples/{ => v1}/choose.csv | 0 examples/{ => v1}/course-with-header.csv | 0 examples/{ => v1}/course.csv | 0 examples/{ => v1}/follow-delimiter.csv | 0 examples/{ => v1}/follow-with-header.csv | 0 .../follow-with-label-and-str-vid.csv | 0 examples/{ => v1}/follow-with-label.csv | 0 examples/{ => v1}/follow.csv | 0 .../student-with-label-and-str-vid.csv | 0 examples/{ => v1}/student.csv | 0 examples/v2/choose.csv | 4 + examples/v2/course-with-header.csv | 5 + examples/v2/course.csv | 3 + examples/v2/follow-delimiter.csv | 4 + examples/v2/follow-with-header.csv | 4 + examples/v2/follow-with-label-and-str-vid.csv | 5 + examples/v2/follow-with-label.csv | 5 + examples/v2/follow.csv | 4 + .../v2/student-with-label-and-str-vid.csv | 4 + examples/v2/student.csv | 3 + go.mod | 2 +- go.sum | 14 +- pkg/client/client.go | 20 - pkg/client/clientpool.go | 68 +- pkg/cmd/{cmd.go => runner.go} | 0 pkg/config/config.go | 100 +- pkg/logger/adapter.go | 23 + pkg/logger/logger.go | 78 +- vendor/github.com/facebook/fbthrift/LICENSE | 201 -- .../fbthrift/thrift/lib/go/thrift/README.md | 4 - .../lib/go/thrift/application_exception.go | 162 - .../thrift/lib/go/thrift/binary_protocol.go | 505 --- .../lib/go/thrift/buffered_transport.go | 92 - .../thrift/lib/go/thrift/client_interface.go | 24 - .../thrift/lib/go/thrift/clientconn.go | 140 - .../thrift/lib/go/thrift/compact_protocol.go | 859 ------ .../thrift/lib/go/thrift/concurrent_server.go | 162 - .../fbthrift/thrift/lib/go/thrift/context.go | 81 - .../thrift/lib/go/thrift/debug_protocol.go | 280 -- .../thrift/lib/go/thrift/deserializer.go | 59 - .../thrift/lib/go/thrift/exception.go | 45 - .../fbthrift/thrift/lib/go/thrift/field.go | 80 - .../thrift/lib/go/thrift/framed_transport.go | 167 - .../fbthrift/thrift/lib/go/thrift/header.go | 698 ----- .../thrift/lib/go/thrift/header_protocol.go | 190 -- .../thrift/lib/go/thrift/header_transport.go | 460 --- .../thrift/lib/go/thrift/http_client.go | 257 -- .../thrift/lib/go/thrift/http_transport.go | 61 - .../thrift/lib/go/thrift/interceptor.go | 110 - .../lib/go/thrift/iostream_transport.go | 213 -- .../thrift/lib/go/thrift/json_protocol.go | 597 ---- .../thrift/lib/go/thrift/memory_buffer.go | 80 - .../thrift/lib/go/thrift/messagetype.go | 32 - .../lib/go/thrift/multiplexed_protocol.go | 164 - .../fbthrift/thrift/lib/go/thrift/numeric.go | 173 -- .../thrift/lib/go/thrift/pointerize.go | 52 - .../thrift/lib/go/thrift/processor.go | 197 -- .../thrift/lib/go/thrift/processor_factory.go | 91 - .../fbthrift/thrift/lib/go/thrift/protocol.go | 211 -- .../lib/go/thrift/protocol_exception.go | 80 - .../thrift/lib/go/thrift/protocol_factory.go | 26 - .../thrift/lib/go/thrift/rich_transport.go | 69 - .../thrift/lib/go/thrift/serializer.go | 84 - .../fbthrift/thrift/lib/go/thrift/server.go | 41 - .../thrift/lib/go/thrift/server_options.go | 109 - .../thrift/lib/go/thrift/server_socket.go | 123 - .../thrift/lib/go/thrift/server_transport.go | 35 - .../lib/go/thrift/simple_json_protocol.go | 1394 --------- .../thrift/lib/go/thrift/simple_server.go | 295 -- .../fbthrift/thrift/lib/go/thrift/socket.go | 192 -- .../thrift/lib/go/thrift/ssl_server_socket.go | 110 - .../thrift/lib/go/thrift/ssl_socket.go | 170 -- .../thrift/lib/go/thrift/transport.go | 72 - .../lib/go/thrift/transport_exception.go | 102 - .../thrift/lib/go/thrift/transport_factory.go | 41 - .../fbthrift/thrift/lib/go/thrift/type.go | 70 - .../thrift/lib/go/thrift/zlib_transport.go | 116 - .../fbthrift/thrift/lib/go/thrift/zstd.go | 33 - .../vesoft-inc/nebula-go/.editorconfig | 12 - .../vesoft-inc/nebula-go/.gitignore | 16 - .../github.com/vesoft-inc/nebula-go/README.md | 68 - .../github.com/vesoft-inc/nebula-go/client.go | 108 - vendor/github.com/vesoft-inc/nebula-go/go.mod | 5 - vendor/github.com/vesoft-inc/nebula-go/go.sum | 4 - .../vesoft-inc/nebula-go/nebula/constants.go | 28 - .../nebula-go/nebula/graph/constants.go | 26 - .../nebula-go/nebula/graph/graphservice.go | 1145 ------- .../nebula-go/nebula/graph/ttypes.go | 2373 --------------- .../vesoft-inc/nebula-go/nebula/ttypes.go | 1804 ----------- vendor/gopkg.in/yaml.v2/.travis.yml | 12 - vendor/gopkg.in/yaml.v2/LICENSE | 201 -- vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 - vendor/gopkg.in/yaml.v2/NOTICE | 13 - vendor/gopkg.in/yaml.v2/README.md | 133 - vendor/gopkg.in/yaml.v2/apic.go | 739 ----- vendor/gopkg.in/yaml.v2/decode.go | 813 ----- vendor/gopkg.in/yaml.v2/emitterc.go | 1685 ---------- vendor/gopkg.in/yaml.v2/encode.go | 390 --- vendor/gopkg.in/yaml.v2/go.mod | 5 - vendor/gopkg.in/yaml.v2/parserc.go | 1095 ------- vendor/gopkg.in/yaml.v2/readerc.go | 412 --- vendor/gopkg.in/yaml.v2/resolve.go | 258 -- vendor/gopkg.in/yaml.v2/scannerc.go | 2712 ----------------- vendor/gopkg.in/yaml.v2/sorter.go | 113 - vendor/gopkg.in/yaml.v2/writerc.go | 26 - vendor/gopkg.in/yaml.v2/yaml.go | 466 --- vendor/gopkg.in/yaml.v2/yamlh.go | 738 ----- vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 -- vendor/modules.txt | 8 - 114 files changed, 307 insertions(+), 25438 deletions(-) rename examples/{ => v1}/choose.csv (100%) rename examples/{ => v1}/course-with-header.csv (100%) rename examples/{ => v1}/course.csv (100%) rename examples/{ => v1}/follow-delimiter.csv (100%) rename examples/{ => v1}/follow-with-header.csv (100%) rename examples/{ => v1}/follow-with-label-and-str-vid.csv (100%) rename examples/{ => v1}/follow-with-label.csv (100%) rename examples/{ => v1}/follow.csv (100%) rename examples/{ => v1}/student-with-label-and-str-vid.csv (100%) rename examples/{ => v1}/student.csv (100%) create mode 100644 examples/v2/choose.csv create mode 100644 examples/v2/course-with-header.csv create mode 100644 examples/v2/course.csv create mode 100644 examples/v2/follow-delimiter.csv create mode 100644 examples/v2/follow-with-header.csv create mode 100644 examples/v2/follow-with-label-and-str-vid.csv create mode 100644 examples/v2/follow-with-label.csv create mode 100644 examples/v2/follow.csv create mode 100644 examples/v2/student-with-label-and-str-vid.csv create mode 100644 examples/v2/student.csv delete mode 100644 pkg/client/client.go rename pkg/cmd/{cmd.go => runner.go} (100%) create mode 100644 pkg/logger/adapter.go delete mode 100644 vendor/github.com/facebook/fbthrift/LICENSE delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/README.md delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/application_exception.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/binary_protocol.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/buffered_transport.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/client_interface.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/clientconn.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/compact_protocol.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/concurrent_server.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/context.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/debug_protocol.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/deserializer.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/exception.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/field.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/framed_transport.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/header.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/header_protocol.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/header_transport.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/http_client.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/http_transport.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/interceptor.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/iostream_transport.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/json_protocol.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/memory_buffer.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/messagetype.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/multiplexed_protocol.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/numeric.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/pointerize.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/processor.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/processor_factory.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/protocol.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/protocol_exception.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/protocol_factory.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/rich_transport.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/serializer.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server_options.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server_socket.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server_transport.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/simple_json_protocol.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/simple_server.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/socket.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/ssl_server_socket.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/ssl_socket.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/transport.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/transport_exception.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/transport_factory.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/type.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/zlib_transport.go delete mode 100644 vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/zstd.go delete mode 100644 vendor/github.com/vesoft-inc/nebula-go/.editorconfig delete mode 100644 vendor/github.com/vesoft-inc/nebula-go/.gitignore delete mode 100644 vendor/github.com/vesoft-inc/nebula-go/README.md delete mode 100644 vendor/github.com/vesoft-inc/nebula-go/client.go delete mode 100644 vendor/github.com/vesoft-inc/nebula-go/go.mod delete mode 100644 vendor/github.com/vesoft-inc/nebula-go/go.sum delete mode 100644 vendor/github.com/vesoft-inc/nebula-go/nebula/constants.go delete mode 100644 vendor/github.com/vesoft-inc/nebula-go/nebula/graph/constants.go delete mode 100644 vendor/github.com/vesoft-inc/nebula-go/nebula/graph/graphservice.go delete mode 100644 vendor/github.com/vesoft-inc/nebula-go/nebula/graph/ttypes.go delete mode 100644 vendor/github.com/vesoft-inc/nebula-go/nebula/ttypes.go delete mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml delete mode 100644 vendor/gopkg.in/yaml.v2/LICENSE delete mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml delete mode 100644 vendor/gopkg.in/yaml.v2/NOTICE delete mode 100644 vendor/gopkg.in/yaml.v2/README.md delete mode 100644 vendor/gopkg.in/yaml.v2/apic.go delete mode 100644 vendor/gopkg.in/yaml.v2/decode.go delete mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go delete mode 100644 vendor/gopkg.in/yaml.v2/encode.go delete mode 100644 vendor/gopkg.in/yaml.v2/go.mod delete mode 100644 vendor/gopkg.in/yaml.v2/parserc.go delete mode 100644 vendor/gopkg.in/yaml.v2/readerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/resolve.go delete mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/sorter.go delete mode 100644 vendor/gopkg.in/yaml.v2/writerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/yaml.go delete mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go delete mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go delete mode 100644 vendor/modules.txt diff --git a/.gitignore b/.gitignore index e07503c3..c39fd8a9 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,6 @@ *.dll *.so *.dylib -nebula-importer # Test binary, build with `go test -c` *.test @@ -15,4 +14,6 @@ nebula-importer # macOS .DS_Store -examples/err/ +err/ +vendor/ +nebula-importer diff --git a/Makefile b/Makefile index e5a11628..48cf617f 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,15 @@ default: build build: clean fmt + @cd cmd; \ + go build -o nebula-importer; \ + mv nebula-importer ..; + @echo "nebula-importer has been outputed to $$(pwd)/nebula-importer"; + +vendor: clean fmt + @cd cmd; go mod vendor + +vendorbuild: vendor @cd cmd; \ go build -mod vendor -o nebula-importer; \ mv nebula-importer ..; diff --git a/ci/bootstrap.sh b/ci/bootstrap.sh index 3937b417..0ee960ea 100755 --- a/ci/bootstrap.sh +++ b/ci/bootstrap.sh @@ -5,19 +5,21 @@ set -e addr=$1 port=$2 -curl -fsSL https://studygolang.com/dl/golang/go1.13.4.linux-amd64.tar.gz -o go1.13.4.linux-amd64.tar.gz -tar zxf go1.13.4.linux-amd64.tar.gz -C /usr/local/ - -export GOROOT=/usr/local/go export GOPATH=/usr/local/nebula/ -export PATH=$PATH:$GOROOT/bin:$GOPATH/bin export GO111MODULE=on -pushd ./importer/cmd -go build -mod vendor -o ../../nebula-importer -popd +# build nebula-console +wget "https://github.com/vesoft-inc/nebula-console/archive/master.zip" -O nebula-console.zip +unzip nebula-console.zip -d . +mv nebula-console-* nebula-console +cd nebula-console +go build -o nebula-console + +cd /usr/local/nebula/importer/cmd +go build -o ../../nebula-importer +cd /usr/local/nebula -until echo "quit" | ./bin/nebula -u user -p password --addr=$addr --port=$port &> /dev/null; do +until echo "quit" | /usr/local/nebula/nebula-console/nebula-console -u user -p password --addr=$addr --port=$port &> /dev/null; do echo "nebula graph is unavailable - sleeping" sleep 2 done diff --git a/docker-compose.yaml b/docker-compose.yaml index 1cf82c10..997772f6 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,19 +1,19 @@ version: '3.4' services: metad: - image: vesoft/nebula-metad:nightly + image: vesoft/nebula-metad:v2-preview-nightly environment: USER: root command: - - --meta_server_addrs=172.29.1.1:45500 - - --local_ip=172.29.1.1 - - --ws_ip=172.29.1.1 + - --meta_server_addrs=metad:45500 + - --local_ip=metad + - --ws_ip=metad - --port=45500 volumes: - metadata:/usr/local/nebula/data/meta - logsdata:/usr/local/nebula/logs healthcheck: - test: ["CMD", "curl", "-f", "http://172.29.1.1:11000/status"] + test: ["CMD", "curl", "-f", "http://metad:11000/status"] interval: 15s timeout: 10s retries: 3 @@ -23,17 +23,16 @@ services: - 11002 networks: nebula-net: - ipv4_address: 172.29.1.1 restart: on-failure storaged: - image: vesoft/nebula-storaged:nightly + image: vesoft/nebula-storaged:v2-preview-nightly environment: USER: root command: - - --meta_server_addrs=172.29.1.1:45500 - - --local_ip=172.29.2.1 - - --ws_ip=172.29.2.1 + - --meta_server_addrs=metad:45500 + - --local_ip=storaged + - --ws_ip=storaged - --port=44500 volumes: - storagedata:/usr/local/nebula/data/storage @@ -41,7 +40,7 @@ services: depends_on: - metad healthcheck: - test: ["CMD", "curl", "-f", "http://172.29.2.1:12000/status"] + test: ["CMD", "curl", "-f", "http://storaged:12000/status"] interval: 15s timeout: 10s retries: 3 @@ -51,24 +50,23 @@ services: - 12002 networks: nebula-net: - ipv4_address: 172.29.2.1 restart: on-failure graphd1: - image: vesoft/nebula-graphd:nightly + image: vesoft/nebula-graphd:v2-preview-nightly environment: USER: root command: - - --meta_server_addrs=172.29.1.1:45500 + - --meta_server_addrs=metad:45500 - --port=3699 - - --ws_ip=172.29.3.1 + - --ws_ip=graphd1 - --log_dir=/usr/local/nebula/logs1 volumes: - logsdata:/usr/local/nebula/logs1 depends_on: - metad healthcheck: - test: ["CMD", "curl", "-f", "http://172.29.3.1:13000/status"] + test: ["CMD", "curl", "-f", "http://graphd1:13000/status"] interval: 30s timeout: 10s retries: 3 @@ -79,17 +77,16 @@ services: - 13002 networks: nebula-net: - ipv4_address: 172.29.3.1 restart: on-failure graphd2: - image: vesoft/nebula-graphd:nightly + image: vesoft/nebula-graphd:v2-preview-nightly environment: USER: root command: - - --meta_server_addrs=172.29.1.1:45500 + - --meta_server_addrs=metad:45500 - --port=3699 - - --ws_ip=172.29.3.2 + - --ws_ip=graphd2 - --log_dir=/usr/local/nebula/logs2 - --enable_authorize=true volumes: @@ -97,7 +94,7 @@ services: depends_on: - metad healthcheck: - test: ["CMD", "curl", "-f", "http://172.29.3.2:13000/status"] + test: ["CMD", "curl", "-f", "http://graphd2:13000/status"] interval: 30s timeout: 10s retries: 3 @@ -108,30 +105,25 @@ services: - 13002 networks: nebula-net: - ipv4_address: 172.29.3.2 restart: on-failure importer: - image: vesoft/nebula-console:nightly + image: golang:alpine volumes: - .:/usr/local/nebula/importer:rw + working_dir: /usr/local/nebula entrypoint: - ./importer/ci/bootstrap.sh - - "172.29.3.1" + - "graphd1" - "3699" depends_on: - graphd1 - graphd2 networks: nebula-net: - ipv4_address: 172.29.4.1 networks: nebula-net: - ipam: - driver: default - config: - - subnet: 172.29.0.0/16 volumes: storagedata: diff --git a/examples/example.yaml b/examples/example.yaml index e52d1b15..d06a863b 100644 --- a/examples/example.yaml +++ b/examples/example.yaml @@ -5,18 +5,18 @@ clientSettings: retry: 3 concurrency: 2 # number of graph clients channelBufferSize: 1 - space: test + space: importer_test connection: user: root password: nebula - address: 172.29.3.1:3699,172.29.3.2:3699 + address: graphd1:3699,graphd2:3699 postStart: commands: | UPDATE CONFIGS storage:wal_ttl=3600; UPDATE CONFIGS storage:rocksdb_column_family_options = { disable_auto_compactions = true }; - DROP SPACE IF EXISTS test; - CREATE SPACE IF NOT EXISTS test(partition_num=5, replica_factor=1); - USE test; + DROP SPACE IF EXISTS importer_test; + CREATE SPACE IF NOT EXISTS importer_test(partition_num=5, replica_factor=1, vid_type=FIXED_STRING(10)); + USE importer_test; CREATE TAG course(name string, credits int); CREATE TAG building(name string); CREATE TAG student(name string, age int, gender string); @@ -32,7 +32,7 @@ clientSettings: UPDATE CONFIGS storage:wal_ttl=86400; logPath: ./err/test.log files: - - path: ./choose.csv + - path: ./v2/choose.csv batchSize: 2 inOrder: false type: csv @@ -48,8 +48,8 @@ files: - name: grade type: int - - path: ./course.csv - failDataPath: ./err/course.csv + - path: ./v2/course.csv + failDataPath: ./err/v2/course.csv batchSize: 2 inOrder: true type: csv @@ -71,8 +71,8 @@ files: - name: name type: string - - path: ./course-with-header.csv - failDataPath: ./err/course-with-header.csv + - path: ./v2/course-with-header.csv + failDataPath: ./err/v2/course-with-header.csv batchSize: 2 inOrder: true type: csv @@ -82,8 +82,8 @@ files: schema: type: vertex - - path: ./follow-with-label.csv - failDataPath: ./err/follow-with-label.csv + - path: ./v2/follow-with-label.csv + failDataPath: ./err/v2/follow-with-label.csv batchSize: 2 inOrder: true type: csv @@ -106,8 +106,8 @@ files: type: double index: 1 - - path: ./follow-with-label-and-str-vid.csv - failDataPath: ./err/follow-with-label-and-str-vid.csv + - path: ./v2/follow-with-label-and-str-vid.csv + failDataPath: ./err/v2/follow-with-label-and-str-vid.csv batchSize: 2 inOrder: true type: csv @@ -121,10 +121,10 @@ files: withRanking: true srcVID: index: 0 - function: hash + # function: hash dstVID: index: 2 - function: hash + # function: hash rank: index: 3 props: @@ -132,8 +132,8 @@ files: type: double index: 1 - - path: ./follow.csv - failDataPath: ./err/follow.csv + - path: ./v2/follow.csv + failDataPath: ./err/v2/follow.csv batchSize: 2 type: csv csv: @@ -148,8 +148,8 @@ files: - name: likeness type: double - - path: ./follow-with-header.csv - failDataPath: ./err/follow-with-header.csv + - path: ./v2/follow-with-header.csv + failDataPath: ./err/v2/follow-with-header.csv batchSize: 2 type: csv csv: @@ -161,8 +161,8 @@ files: name: follow withRanking: true - - path: ./student.csv - failDataPath: ./err/student.csv + - path: ./v2/student.csv + failDataPath: ./err/v2/student.csv batchSize: 2 type: csv csv: @@ -181,8 +181,8 @@ files: - name: gender type: string - - path: ./student.csv - failDataPath: ./err/student_index.csv + - path: ./v2/student.csv + failDataPath: ./err/v2/student_index.csv batchSize: 2 type: csv csv: @@ -193,7 +193,7 @@ files: vertex: vid: index: 1 - function: hash + # function: hash tags: - name: student props: @@ -206,8 +206,8 @@ files: - name: gender type: string - - path: ./student-with-label-and-str-vid.csv - failDataPath: ./err/student_label_str_vid.csv + - path: ./v2/student-with-label-and-str-vid.csv + failDataPath: ./err/v2/student_label_str_vid.csv batchSize: 2 type: csv csv: @@ -218,7 +218,7 @@ files: vertex: vid: index: 1 - function: uuid + # function: uuid tags: - name: student props: @@ -231,8 +231,8 @@ files: - name: gender type: string - - path: ./follow.csv - failDataPath: ./err/follow_index.csv + - path: ./v2/follow.csv + failDataPath: ./err/v2/follow_index.csv batchSize: 2 limit: 3 type: csv @@ -245,10 +245,10 @@ files: name: follow srcVID: index: 0 - function: hash + # function: hash dstVID: index: 1 - function: uuid + # function: uuid rank: index: 2 props: @@ -256,8 +256,8 @@ files: type: double index: 3 - - path: ./follow-delimiter.csv - failDataPath: ./err/follow-delimiter.csv + - path: ./v2/follow-delimiter.csv + failDataPath: ./err/v2/follow-delimiter.csv batchSize: 2 type: csv csv: @@ -271,7 +271,7 @@ files: withRanking: true - path: https://raw.githubusercontent.com/vesoft-inc/nebula-importer/master/examples/follow.csv - failDataPath: ./err/follow_http.csv + failDataPath: ./err/v2/follow_http.csv batchSize: 2 limit: 3 type: csv @@ -284,10 +284,10 @@ files: name: follow srcVID: index: 0 - function: hash + # function: hash dstVID: index: 1 - function: uuid + # function: uuid rank: index: 2 props: @@ -295,8 +295,8 @@ files: type: double index: 3 - - path: ./course.csv - failDataPath: ./err/course-empty-props.csv + - path: ./v2/course.csv + failDataPath: ./err/v2/course-empty-props.csv batchSize: 2 inOrder: true type: csv @@ -312,8 +312,8 @@ files: tags: - name: course_no_props - - path: ./course.csv - failDataPath: ./err/course-multi-empty-props.csv + - path: ./v2/course.csv + failDataPath: ./err/v2/course-multi-empty-props.csv batchSize: 2 inOrder: true type: csv @@ -330,8 +330,8 @@ files: - name: course_no_props - name: building_no_props - - path: ./course.csv - failDataPath: ./err/course-mix-empty-props.csv + - path: ./v2/course.csv + failDataPath: ./err/v2/course-mix-empty-props.csv batchSize: 2 inOrder: true type: csv @@ -352,8 +352,8 @@ files: type: string index: 3 - - path: ./course.csv - failDataPath: ./err/course-mix-empty-props-2.csv + - path: ./v2/course.csv + failDataPath: ./err/v2/course-mix-empty-props-2.csv batchSize: 2 inOrder: true type: csv @@ -375,8 +375,8 @@ files: - name: course_no_props - - path: ./follow.csv - failDataPath: ./err/follow-empty-props.csv + - path: ./v2/follow.csv + failDataPath: ./err/v2/follow-empty-props.csv batchSize: 2 type: csv csv: diff --git a/examples/choose.csv b/examples/v1/choose.csv similarity index 100% rename from examples/choose.csv rename to examples/v1/choose.csv diff --git a/examples/course-with-header.csv b/examples/v1/course-with-header.csv similarity index 100% rename from examples/course-with-header.csv rename to examples/v1/course-with-header.csv diff --git a/examples/course.csv b/examples/v1/course.csv similarity index 100% rename from examples/course.csv rename to examples/v1/course.csv diff --git a/examples/follow-delimiter.csv b/examples/v1/follow-delimiter.csv similarity index 100% rename from examples/follow-delimiter.csv rename to examples/v1/follow-delimiter.csv diff --git a/examples/follow-with-header.csv b/examples/v1/follow-with-header.csv similarity index 100% rename from examples/follow-with-header.csv rename to examples/v1/follow-with-header.csv diff --git a/examples/follow-with-label-and-str-vid.csv b/examples/v1/follow-with-label-and-str-vid.csv similarity index 100% rename from examples/follow-with-label-and-str-vid.csv rename to examples/v1/follow-with-label-and-str-vid.csv diff --git a/examples/follow-with-label.csv b/examples/v1/follow-with-label.csv similarity index 100% rename from examples/follow-with-label.csv rename to examples/v1/follow-with-label.csv diff --git a/examples/follow.csv b/examples/v1/follow.csv similarity index 100% rename from examples/follow.csv rename to examples/v1/follow.csv diff --git a/examples/student-with-label-and-str-vid.csv b/examples/v1/student-with-label-and-str-vid.csv similarity index 100% rename from examples/student-with-label-and-str-vid.csv rename to examples/v1/student-with-label-and-str-vid.csv diff --git a/examples/student.csv b/examples/v1/student.csv similarity index 100% rename from examples/student.csv rename to examples/v1/student.csv diff --git a/examples/v2/choose.csv b/examples/v2/choose.csv new file mode 100644 index 00000000..800951e9 --- /dev/null +++ b/examples/v2/choose.csv @@ -0,0 +1,4 @@ +x200,x101,5 +x200,y102,3 +y201,y102,3 +z202,y102,3 diff --git a/examples/v2/course-with-header.csv b/examples/v2/course-with-header.csv new file mode 100644 index 00000000..a4d75cfe --- /dev/null +++ b/examples/v2/course-with-header.csv @@ -0,0 +1,5 @@ +:LABEL,:VID(string),course.name,building.name:string,:IGNORE,course.credits:int ++,English,English,"No11 +B\",2,6 ++,Math,Math,No5,1,3 +-,Math,Math,No5,1,3 diff --git a/examples/v2/course.csv b/examples/v2/course.csv new file mode 100644 index 00000000..1deedbf3 --- /dev/null +++ b/examples/v2/course.csv @@ -0,0 +1,3 @@ +x101,Math,3,No5 +y102,English,6,No11 +"z103",Chinese,1,No1 diff --git a/examples/v2/follow-delimiter.csv b/examples/v2/follow-delimiter.csv new file mode 100644 index 00000000..9570ddb0 --- /dev/null +++ b/examples/v2/follow-delimiter.csv @@ -0,0 +1,4 @@ +:DST_VID(string)|follow.likeness:double|:SRC_VID(string)|:RANK +x201|92.5|y200|0 +y200|85.6|x201|1 +z202|93.2|x201|2 diff --git a/examples/v2/follow-with-header.csv b/examples/v2/follow-with-header.csv new file mode 100644 index 00000000..5ee62f16 --- /dev/null +++ b/examples/v2/follow-with-header.csv @@ -0,0 +1,4 @@ +:DST_VID(string),follow.likeness:double,:SRC_VID(string),:RANK +x201,92.5,y200,0 +y200,85.6,x201,1 +z202,93.2,x201,2 diff --git a/examples/v2/follow-with-label-and-str-vid.csv b/examples/v2/follow-with-label-and-str-vid.csv new file mode 100644 index 00000000..e0e8ec01 --- /dev/null +++ b/examples/v2/follow-with-label-and-str-vid.csv @@ -0,0 +1,5 @@ ++,一y201,92.5,x200,0 ++,一x200,85.6,y201,1 ++,一202,93.2,y201,2 +-,一y201,92.5,x200,0 +-,一x200,85.6,y201,1 diff --git a/examples/v2/follow-with-label.csv b/examples/v2/follow-with-label.csv new file mode 100644 index 00000000..ce3fd1ad --- /dev/null +++ b/examples/v2/follow-with-label.csv @@ -0,0 +1,5 @@ ++,x201,92.5,y200,0 ++,y200,85.6,x201,1 ++,z202,93.2,x201,2 +-,x201,92.5,y200,0 +-,y200,85.6,x201,1 diff --git a/examples/v2/follow.csv b/examples/v2/follow.csv new file mode 100644 index 00000000..f6a137ad --- /dev/null +++ b/examples/v2/follow.csv @@ -0,0 +1,4 @@ +x200,y201,0,92.5 +y201,x200,1,85.6 +y201,z202,2,93.2 +y201,z202,1,96.2 diff --git a/examples/v2/student-with-label-and-str-vid.csv b/examples/v2/student-with-label-and-str-vid.csv new file mode 100644 index 00000000..0ab2daa5 --- /dev/null +++ b/examples/v2/student-with-label-and-str-vid.csv @@ -0,0 +1,4 @@ ++,x200,Monica,16,female ++,y201,Mike,18,male ++,z202,Jane,17,female +-,y201,Mike,18,male diff --git a/examples/v2/student.csv b/examples/v2/student.csv new file mode 100644 index 00000000..497b3fb4 --- /dev/null +++ b/examples/v2/student.csv @@ -0,0 +1,3 @@ +x200,Monica,16,female +y201,Mike,18,male +z202,Jane,17,female diff --git a/go.mod b/go.mod index b5a80295..44462531 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,7 @@ module github.com/vesoft-inc/nebula-importer require ( - github.com/vesoft-inc/nebula-go v1.1.0 + github.com/vesoft-inc/nebula-clients/go v0.0.0-20201124065101-e86652852b4c gopkg.in/yaml.v2 v2.2.4 ) diff --git a/go.sum b/go.sum index f4e5c71c..7cfe49ec 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,18 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/facebook/fbthrift v0.0.0-20190922225929-2f9839604e25 h1:dezRDs9oGYxeavyvcNg/Js+dK6kIvfzERoJ7K8Xkv14= github.com/facebook/fbthrift v0.0.0-20190922225929-2f9839604e25/go.mod h1:2tncLx5rmw69e5kMBv/yJneERbzrr1yr5fdlnTbu8lU= -github.com/vesoft-inc/nebula-go v1.1.0 h1:o67dNlZphpCsVVkB4mk7ItqdG+1DvCfU0XTcB5mChDs= -github.com/vesoft-inc/nebula-go v1.1.0/go.mod h1:dM1R7vZjYCXZ20Kie4KYGS0UHxVDNYmDeMgb1PFPDmw= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/vesoft-inc/nebula-clients/go v0.0.0-20201124065101-e86652852b4c h1:bDxhSBwdnOosMERABPRnq3/P3KoTtzNK6tx2eKqvhSI= +github.com/vesoft-inc/nebula-clients/go v0.0.0-20201124065101-e86652852b4c/go.mod h1:F/Qx0oOZ1IrUFU6Umn3l2Wqr1A9sRtHzZReDnzfOlp8= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/client/client.go b/pkg/client/client.go deleted file mode 100644 index f2a32e10..00000000 --- a/pkg/client/client.go +++ /dev/null @@ -1,20 +0,0 @@ -package client - -import ( - "time" - - nebula "github.com/vesoft-inc/nebula-go" -) - -func NewNebulaConnection(addr, user, password string) (*nebula.GraphClient, error) { - opts := nebula.WithTimeout(10 * time.Second) - client, err := nebula.NewClient(addr, opts) - if err != nil { - return nil, err - } - - if err = client.Connect(user, password); err != nil { - return nil, err - } - return client, nil -} diff --git a/pkg/client/clientpool.go b/pkg/client/clientpool.go index a70c5dc6..00f580ed 100644 --- a/pkg/client/clientpool.go +++ b/pkg/client/clientpool.go @@ -2,11 +2,12 @@ package client import ( "fmt" + "strconv" "strings" "time" - nebula "github.com/vesoft-inc/nebula-go" - "github.com/vesoft-inc/nebula-go/nebula/graph" + nebula "github.com/vesoft-inc/nebula-clients/go" + "github.com/vesoft-inc/nebula-clients/go/nebula/graph" "github.com/vesoft-inc/nebula-importer/pkg/base" "github.com/vesoft-inc/nebula-importer/pkg/config" "github.com/vesoft-inc/nebula-importer/pkg/logger" @@ -19,33 +20,56 @@ type ClientPool struct { postStart *config.NebulaPostStart preStop *config.NebulaPreStop statsCh chan<- base.Stats - Conns []*nebula.GraphClient + pool *nebula.ConnectionPool + Sessions []*nebula.Session requestChs []chan base.ClientRequest } func NewClientPool(settings *config.NebulaClientSettings, statsCh chan<- base.Stats) (*ClientPool, error) { + addrs := strings.Split(*settings.Connection.Address, ",") + var hosts []nebula.HostAddress + for _, addr := range addrs { + hostPort := strings.Split(addr, ":") + if len(hostPort) != 2 { + return nil, fmt.Errorf("Invalid address: %s", addr) + } + port, err := strconv.Atoi(hostPort[1]) + if err != nil { + return nil, err + } + hostAddr := nebula.HostAddress{Host: hostPort[0], Port: port} + hosts = append(hosts, hostAddr) + } + conf := nebula.PoolConfig{ + TimeOut: 0, + IdleTime: 0, + MaxConnPoolSize: len(addrs) * *settings.Concurrency, + MinConnPoolSize: 1, + } + connPool, err := nebula.NewConnectionPool(hosts, conf, logger.NebulaLogger{}) + if err != nil { + return nil, err + } pool := ClientPool{ space: *settings.Space, postStart: settings.PostStart, preStop: settings.PreStop, statsCh: statsCh, + pool: connPool, } - addrs := strings.Split(*settings.Connection.Address, ",") pool.retry = *settings.Retry pool.concurrency = (*settings.Concurrency) * len(addrs) - pool.Conns = make([]*nebula.GraphClient, pool.concurrency) + pool.Sessions = make([]*nebula.Session, pool.concurrency) pool.requestChs = make([]chan base.ClientRequest, pool.concurrency) j := 0 - for _, addr := range addrs { + for k := 0; k < len(addrs); k++ { for i := 0; i < *settings.Concurrency; i++ { - if conn, err := NewNebulaConnection(strings.TrimSpace(addr), *settings.Connection.User, *settings.Connection.Password); err != nil { + if pool.Sessions[j], err = pool.pool.GetSession(*settings.Connection.User, *settings.Connection.Password); err != nil { return nil, err - } else { - pool.Conns[j] = conn - pool.requestChs[j] = make(chan base.ClientRequest, *settings.ChannelBufferSize) - j++ } + pool.requestChs[j] = make(chan base.ClientRequest, *settings.ChannelBufferSize) + j++ } } @@ -53,8 +77,8 @@ func NewClientPool(settings *config.NebulaClientSettings, statsCh chan<- base.St } func (p *ClientPool) getActiveConnIdx() int { - for i := range p.Conns { - if p.Conns[i] != nil { + for i := range p.Sessions { + if p.Sessions[i] != nil { return i } } @@ -65,7 +89,7 @@ func (p *ClientPool) exec(i int, stmt string) error { if len(stmt) == 0 { return nil } - resp, err := p.Conns[i].Execute(stmt) + resp, err := p.Sessions[i].Execute(stmt) if err != nil { return fmt.Errorf("Client(%d) fails to execute commands (%s), error: %s", i, stmt, err.Error()) } @@ -88,13 +112,14 @@ func (p *ClientPool) Close() { } for i := 0; i < p.concurrency; i++ { - if p.Conns[i] != nil { - p.Conns[i].Disconnect() + if p.Sessions[i] != nil { + p.Sessions[i].Release() } if p.requestChs[i] != nil { close(p.requestChs[i]) } } + p.pool.Close() } func (p *ClientPool) Init() error { @@ -106,11 +131,7 @@ func (p *ClientPool) Init() error { } } - stmt := fmt.Sprintf("USE `%s`;", p.space) for i := 0; i < p.concurrency; i++ { - if err := p.exec(i, stmt); err != nil { - return err - } go func(i int) { if p.postStart != nil { afterPeriod, _ := time.ParseDuration(*p.postStart.AfterPeriod) @@ -123,6 +144,11 @@ func (p *ClientPool) Init() error { } func (p *ClientPool) startWorker(i int) { + stmt := fmt.Sprintf("USE `%s`;", p.space) + if err := p.exec(i, stmt); err != nil { + logger.Error(err.Error()) + return + } for { data, ok := <-p.requestChs[i] if !ok { @@ -139,7 +165,7 @@ func (p *ClientPool) startWorker(i int) { var err error = nil var resp *graph.ExecutionResponse = nil for retry := p.retry; retry > 0; retry-- { - resp, err = p.Conns[i].Execute(data.Stmt) + resp, err = p.Sessions[i].Execute(data.Stmt) if err == nil && !nebula.IsError(resp) { break } diff --git a/pkg/cmd/cmd.go b/pkg/cmd/runner.go similarity index 100% rename from pkg/cmd/cmd.go rename to pkg/cmd/runner.go diff --git a/pkg/config/config.go b/pkg/config/config.go index b4cd33b6..2521e800 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -51,6 +51,7 @@ type Prop struct { type VID struct { Index *int `json:"index" yaml:"index"` Function *string `json:"function" yaml:"function"` + Type *string `json:"type" yaml:"type"` } type Rank struct { @@ -108,7 +109,14 @@ type YAMLConfig struct { Files []*File `json:"files" yaml:"files"` } -var supportedVersions []string = []string{"v1rc1", "v1rc2", "v1"} +var ( + kDefaultVidType = "string" + kDefaultConnAddr = "127.0.0.1:3699" + kDefaultUser = "root" + kDefaultPassword = "nebula" + kDefaultBatchSize = 128 + supportedVersions = []string{"v1rc1", "v1rc2", "v1", "v2"} +) func isSupportedVersion(ver string) bool { for _, v := range supportedVersions { @@ -236,20 +244,17 @@ func (n *NebulaClientSettings) validateAndReset(prefix string) error { func (c *NebulaClientConnection) validateAndReset(prefix string) error { if c.Address == nil { - a := "127.0.0.1:3699" - c.Address = &a + c.Address = &kDefaultConnAddr logger.Warnf("%s.address: %s", prefix, *c.Address) } if c.User == nil { - u := "user" - c.User = &u + c.User = &kDefaultUser logger.Warnf("%s.user: %s", prefix, *c.User) } if c.Password == nil { - p := "password" - c.Password = &p + c.Password = &kDefaultPassword logger.Warnf("%s.password: %s", prefix, *c.Password) } return nil @@ -302,8 +307,7 @@ func (f *File) validateAndReset(dir, prefix string) error { } if f.BatchSize == nil { - b := 128 - f.BatchSize = &b + f.BatchSize = &kDefaultBatchSize logger.Infof("Invalid batch size in file(%s), reset to %d", *f.Path, *f.BatchSize) } @@ -411,9 +415,17 @@ func (v *VID) ParseFunction(str string) (err error) { err = nil if i < 0 && j < 0 { v.Function = nil + v.Type = &kDefaultVidType } else if i > 0 && j > i { - function := strings.ToLower(str[i+1 : j]) - v.Function = &function + strs := strings.ToLower(str[i+1 : j]) + fnType := strings.Split(strs, ",") + if len(fnType) > 1 { + v.Function = &fnType[0] + v.Type = &fnType[1] + } else { + v.Function = nil + v.Type = &fnType[0] + } } else { err = fmt.Errorf("Invalid function format: %s", str) } @@ -422,9 +434,9 @@ func (v *VID) ParseFunction(str string) (err error) { func (v *VID) String(vid string) string { if v.Function == nil || *v.Function == "" { - return vid + return fmt.Sprintf("%s(%s)", vid, *v.Type) } else { - return fmt.Sprintf("%s(%s)", vid, *v.Function) + return fmt.Sprintf("%s(%s,%s)", vid, *v.Function, *v.Type) } } @@ -433,7 +445,15 @@ func (v *VID) FormatValue(record base.Record) (string, error) { return "", fmt.Errorf("vid index(%d) out of range record length(%d)", *v.Index, len(record)) } if v.Function == nil || *v.Function == "" { - return record[*v.Index], nil + vid := record[*v.Index] + if err := checkVidFormat(vid); err != nil { + return "", err + } + if *v.Type == "string" { + return fmt.Sprintf("%q", vid), nil + } else { + return vid, nil + } } else { return fmt.Sprintf("%s(%q)", *v.Function, record[*v.Index]), nil } @@ -442,6 +462,7 @@ func (v *VID) FormatValue(record base.Record) (string, error) { func (v *VID) checkFunction(prefix string) error { if v.Function != nil { switch strings.ToLower(*v.Function) { + // FIXME: uuid is not supported in nebula-graph-v2, and hash returns int which is not the valid vid type. case "", "hash", "uuid": default: return fmt.Errorf("Invalid %s.function: %s, only following values are supported: \"\", hash, uuid", prefix, *v.Function) @@ -460,6 +481,15 @@ func (v *VID) validateAndReset(prefix string, defaultVal int) error { if err := v.checkFunction(prefix); err != nil { return err } + if v.Type != nil { + vidType := strings.TrimSpace(strings.ToLower(*v.Type)) + if vidType != "string" && vidType != "int" { + return fmt.Errorf("vid type must be `string' or `int', now is %s", vidType) + } + } else { + v.Type = &kDefaultVidType + logger.Warnf("Not set %s.Type, reset to default value `%s'", prefix, *v.Type) + } return nil } @@ -473,7 +503,7 @@ func (r *Rank) validateAndReset(prefix string, defaultVal int) error { return nil } -var re = regexp.MustCompile(`^([+-]?\d+|hash\("(.+)"\)|uuid\("(.+)"\))$`) +var re = regexp.MustCompile(`^([+-]?\d+|hash\(".+"\)|uuid\(".+"\)|".+"|.+)$`) func checkVidFormat(vid string) error { if !re.MatchString(vid) { @@ -495,24 +525,13 @@ func (e *Edge) FormatValues(record base.Record) (string, error) { if e.Rank != nil && e.Rank.Index != nil { rank = fmt.Sprintf("@%s", record[*e.Rank.Index]) } - var srcVID string - if e.SrcVID.Function != nil { - //TODO(yee): differentiate string and integer column type, find and compare src/dst vertex column with property - srcVID = fmt.Sprintf("%s(%q)", *e.SrcVID.Function, record[*e.SrcVID.Index]) - } else { - srcVID = record[*e.SrcVID.Index] - if err := checkVidFormat(srcVID); err != nil { - return "", err - } + srcVID, err := e.SrcVID.FormatValue(record) + if err != nil { + return "", err } - var dstVID string - if e.DstVID.Function != nil { - dstVID = fmt.Sprintf("%s(%q)", *e.DstVID.Function, record[*e.DstVID.Index]) - } else { - dstVID = record[*e.DstVID.Index] - if err := checkVidFormat(dstVID); err != nil { - return "", err - } + dstVID, err := e.DstVID.FormatValue(record) + if err != nil { + return "", err } return fmt.Sprintf(" %s->%s%s:(%s) ", srcVID, dstVID, rank, strings.Join(cells, ",")), nil } @@ -582,7 +601,7 @@ func (e *Edge) validateAndReset(prefix string) error { } } else { index := 0 - e.SrcVID = &VID{Index: &index} + e.SrcVID = &VID{Index: &index, Type: &kDefaultVidType} } if e.DstVID != nil { if err := e.DstVID.validateAndReset(fmt.Sprintf("%s.dstVID", prefix), 1); err != nil { @@ -590,7 +609,7 @@ func (e *Edge) validateAndReset(prefix string) error { } } else { index := 1 - e.DstVID = &VID{Index: &index} + e.DstVID = &VID{Index: &index, Type: &kDefaultVidType} } start := 2 if e.Rank != nil { @@ -628,14 +647,9 @@ func (v *Vertex) FormatValues(record base.Record) (string, error) { cells = append(cells, str) } } - var vid string - if v.VID.Function != nil { - vid = fmt.Sprintf("%s(%q)", *v.VID.Function, record[*v.VID.Index]) - } else { - vid = record[*v.VID.Index] - if err := checkVidFormat(vid); err != nil { - return "", err - } + vid, err := v.VID.FormatValue(record) + if err != nil { + return "", err } return fmt.Sprintf(" %s: (%s)", vid, strings.Join(cells, ",")), nil } @@ -689,7 +703,7 @@ func (v *Vertex) validateAndReset(prefix string) error { } } else { index := 0 - v.VID = &VID{Index: &index} + v.VID = &VID{Index: &index, Type: &kDefaultVidType} } j := 1 for i := range v.Tags { diff --git a/pkg/logger/adapter.go b/pkg/logger/adapter.go new file mode 100644 index 00000000..7761c782 --- /dev/null +++ b/pkg/logger/adapter.go @@ -0,0 +1,23 @@ +package logger + +import ( + "fmt" +) + +type NebulaLogger struct{} + +func (l NebulaLogger) Info(msg string) { + infoWithSkip(2, fmt.Sprintf("[nebula-clients] %s", msg)) +} + +func (l NebulaLogger) Warn(msg string) { + warnWithSkip(2, fmt.Sprintf("[nebula-clients] %s", msg)) +} + +func (l NebulaLogger) Error(msg string) { + errorWithSkip(2, fmt.Sprintf("[nebula-clients] %s", msg)) +} + +func (l NebulaLogger) Fatal(msg string) { + fatalWithSkip(2, fmt.Sprintf("[nebula-clients] %s", msg)) +} diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 45c97deb..65fae0dc 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -20,81 +20,73 @@ func Init(path string) { } func Info(v ...interface{}) { - _, file, no, ok := runtime.Caller(1) - if ok { - file = filepath.Base(file) - logger.Printf("[INFO] %s:%d: %s", file, no, fmt.Sprint(v...)) - } else { - logger.Fatalf("Fail to get caller info of logger.Info") - } + infoWithSkip(2, fmt.Sprint(v...)) } func Infof(format string, v ...interface{}) { - _, file, no, ok := runtime.Caller(1) - if ok { - file = filepath.Base(file) - logger.Printf("[INFO] %s:%d: %s", file, no, fmt.Sprintf(format, v...)) - } else { - logger.Fatalf("Fail to get caller info of logger.Infof") - } + infoWithSkip(2, fmt.Sprintf(format, fmt.Sprintf(format, v...))) } func Warn(v ...interface{}) { - _, file, no, ok := runtime.Caller(1) - if ok { - file = filepath.Base(file) - logger.Printf("[WARN] %s:%d: %s", file, no, fmt.Sprint(v...)) - } else { - logger.Fatalf("Fail to get caller info of logger.Warn") - } + warnWithSkip(2, fmt.Sprint(v...)) } func Warnf(format string, v ...interface{}) { - _, file, no, ok := runtime.Caller(1) - if ok { - file = filepath.Base(file) - logger.Printf("[WARN] %s:%d: %s", file, no, fmt.Sprintf(format, v...)) - } else { - logger.Fatalf("Fail to get caller info of logger.Warnf") - } + warnWithSkip(2, fmt.Sprintf(format, v...)) } func Error(v ...interface{}) { - _, file, no, ok := runtime.Caller(1) + errorWithSkip(2, fmt.Sprint(v...)) +} + +func Errorf(format string, v ...interface{}) { + errorWithSkip(2, fmt.Sprintf(format, v...)) +} + +func Fatal(v ...interface{}) { + fatalWithSkip(2, fmt.Sprint(v...)) +} + +func Fatalf(format string, v ...interface{}) { + fatalWithSkip(2, fmt.Sprintf(format, v...)) +} + +func infoWithSkip(skip int, msg string) { + _, file, no, ok := runtime.Caller(skip) if ok { file = filepath.Base(file) - logger.Printf("[ERROR] %s:%d: %s", file, no, fmt.Sprint(v...)) + logger.Printf("[INFO] %s:%d: %s", file, no, msg) } else { - logger.Fatalf("Fail to get caller info of logger.Error") + logger.Fatalf("Fail to get caller info of logger.Info") } } -func Errorf(format string, v ...interface{}) { - _, file, no, ok := runtime.Caller(1) +func warnWithSkip(skip int, msg string) { + _, file, no, ok := runtime.Caller(skip) if ok { file = filepath.Base(file) - logger.Printf("[ERROR] %s:%d: %s", file, no, fmt.Sprintf(format, v...)) + logger.Printf("[WARN] %s:%d: %s", file, no, msg) } else { - logger.Fatalf("Fail to get caller info of logger.Errorf") + logger.Fatalf("Fail to get caller info of logger.Warn") } } -func Fatal(v ...interface{}) { - _, file, no, ok := runtime.Caller(1) +func errorWithSkip(skip int, msg string) { + _, file, no, ok := runtime.Caller(skip) if ok { file = filepath.Base(file) - logger.Fatalf("[FATAL] %s:%d: %s", file, no, fmt.Sprint(v...)) + logger.Printf("[ERROR] %s:%d: %s", file, no, msg) } else { - logger.Fatalf("Fail to get caller info of logger.Fatal") + logger.Fatalf("Fail to get caller info of logger.Error") } } -func Fatalf(format string, v ...interface{}) { - _, file, no, ok := runtime.Caller(1) +func fatalWithSkip(skip int, msg string) { + _, file, no, ok := runtime.Caller(skip) if ok { file = filepath.Base(file) - logger.Fatalf("[FATAL] %s:%d: %s", file, no, fmt.Sprintf(format, v...)) + logger.Fatalf("[FATAL] %s:%d: %s", file, no, msg) } else { - logger.Fatalf("Fail to get caller info of logger.Fatalf") + logger.Fatalf("Fail to get caller info of logger.Fatal") } } diff --git a/vendor/github.com/facebook/fbthrift/LICENSE b/vendor/github.com/facebook/fbthrift/LICENSE deleted file mode 100644 index 25187a4b..00000000 --- a/vendor/github.com/facebook/fbthrift/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2014 Facebook - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/README.md b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/README.md deleted file mode 100644 index 42900086..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Facebook Thrift Go --------------------------------------------- - -The files in this repository is only for the Go libraries of fbthrift, see [facebook/fbthrift](https://github.com/facebook/fbthrift) for the remaining code. diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/application_exception.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/application_exception.go deleted file mode 100644 index c82ffc70..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/application_exception.go +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -const ( - UNKNOWN_APPLICATION_EXCEPTION = 0 - UNKNOWN_METHOD = 1 - INVALID_MESSAGE_TYPE_EXCEPTION = 2 - WRONG_METHOD_NAME = 3 - BAD_SEQUENCE_ID = 4 - MISSING_RESULT = 5 - INTERNAL_ERROR = 6 - PROTOCOL_ERROR = 7 -) - -// ApplicationException is an application level Thrift exception -type ApplicationException interface { - Exception - TypeID() int32 - Read(iprot Protocol) (ApplicationException, error) - Write(oprot Protocol) error -} - -type applicationException struct { - message string - exceptionType int32 -} - -func (e applicationException) Error() string { - return e.message -} - -// NewApplicationException creates a new ApplicationException -func NewApplicationException(exceptionType int32, message string) ApplicationException { - return &applicationException{message, exceptionType} -} - -// TypeID returns the exception type -func (e *applicationException) TypeID() int32 { - return e.exceptionType -} - -// Read reads an ApplicationException from the protocol -func (e *applicationException) Read(iprot Protocol) (ApplicationException, error) { - _, err := iprot.ReadStructBegin() - if err != nil { - return nil, err - } - - message := "" - exceptionType := int32(UNKNOWN_APPLICATION_EXCEPTION) - - for { - _, ttype, id, err := iprot.ReadFieldBegin() - if err != nil { - return nil, err - } - if ttype == STOP { - break - } - switch id { - case 1: - if ttype == STRING { - if message, err = iprot.ReadString(); err != nil { - return nil, err - } - } else { - if err = SkipDefaultDepth(iprot, ttype); err != nil { - return nil, err - } - } - case 2: - if ttype == I32 { - if exceptionType, err = iprot.ReadI32(); err != nil { - return nil, err - } - } else { - if err = SkipDefaultDepth(iprot, ttype); err != nil { - return nil, err - } - } - default: - if err = SkipDefaultDepth(iprot, ttype); err != nil { - return nil, err - } - } - if err = iprot.ReadFieldEnd(); err != nil { - return nil, err - } - } - return NewApplicationException(exceptionType, message), iprot.ReadStructEnd() -} - -// Write writes an exception to the protocol -func (e *applicationException) Write(oprot Protocol) (err error) { - err = oprot.WriteStructBegin("TApplicationException") - if len(e.Error()) > 0 { - err = oprot.WriteFieldBegin("message", STRING, 1) - if err != nil { - return - } - err = oprot.WriteString(e.Error()) - if err != nil { - return - } - err = oprot.WriteFieldEnd() - if err != nil { - return - } - } - err = oprot.WriteFieldBegin("type", I32, 2) - if err != nil { - return - } - err = oprot.WriteI32(e.exceptionType) - if err != nil { - return - } - err = oprot.WriteFieldEnd() - if err != nil { - return - } - err = oprot.WriteFieldStop() - if err != nil { - return - } - err = oprot.WriteStructEnd() - return -} - -// sendException is a utility function to send the exception for the specified -// method. -func sendException(oprot Protocol, name string, seqID int32, err ApplicationException) error { - if e2 := oprot.WriteMessageBegin(name, EXCEPTION, seqID); e2 != nil { - return e2 - } else if e2 := err.Write(oprot); e2 != nil { - return e2 - } else if e2 := oprot.WriteMessageEnd(); e2 != nil { - return e2 - } else if e2 := oprot.Flush(); e2 != nil { - return e2 - } - return nil -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/binary_protocol.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/binary_protocol.go deleted file mode 100644 index 7e5efcac..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/binary_protocol.go +++ /dev/null @@ -1,505 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -const BinaryVersionMask uint32 = 0xffff0000 -const BinaryVersion1 uint32 = 0x80010000 - -type BinaryProtocol struct { - trans RichTransport - origTransport Transport - reader io.Reader - writer io.Writer - strictRead bool - strictWrite bool - buffer [64]byte -} - -type BinaryProtocolFactory struct { - strictRead bool - strictWrite bool -} - -func NewBinaryProtocolTransport(t Transport) *BinaryProtocol { - return NewBinaryProtocol(t, false, true) -} - -func NewBinaryProtocol(t Transport, strictRead, strictWrite bool) *BinaryProtocol { - p := &BinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite} - if et, ok := t.(RichTransport); ok { - p.trans = et - } else { - p.trans = NewRichTransport(t) - } - p.reader = p.trans - p.writer = p.trans - return p -} - -func NewBinaryProtocolFactoryDefault() *BinaryProtocolFactory { - return NewBinaryProtocolFactory(false, true) -} - -func NewBinaryProtocolFactory(strictRead, strictWrite bool) *BinaryProtocolFactory { - return &BinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite} -} - -func (p *BinaryProtocolFactory) GetProtocol(t Transport) Protocol { - return NewBinaryProtocol(t, p.strictRead, p.strictWrite) -} - -/** - * Writing Methods - */ - -func (p *BinaryProtocol) WriteMessageBegin(name string, typeId MessageType, seqId int32) error { - if p.strictWrite { - version := uint32(VERSION_1) | uint32(typeId) - e := p.WriteI32(int32(version)) - if e != nil { - return e - } - e = p.WriteString(name) - if e != nil { - return e - } - e = p.WriteI32(seqId) - return e - } else { - e := p.WriteString(name) - if e != nil { - return e - } - e = p.WriteByte(byte(typeId)) - if e != nil { - return e - } - e = p.WriteI32(seqId) - return e - } -} - -func (p *BinaryProtocol) WriteMessageEnd() error { - return nil -} - -func (p *BinaryProtocol) WriteStructBegin(name string) error { - return nil -} - -func (p *BinaryProtocol) WriteStructEnd() error { - return nil -} - -func (p *BinaryProtocol) WriteFieldBegin(name string, typeId Type, id int16) error { - e := p.WriteByte(byte(typeId)) - if e != nil { - return e - } - e = p.WriteI16(id) - return e -} - -func (p *BinaryProtocol) WriteFieldEnd() error { - return nil -} - -func (p *BinaryProtocol) WriteFieldStop() error { - e := p.WriteByte(STOP) - return e -} - -func (p *BinaryProtocol) WriteMapBegin(keyType Type, valueType Type, size int) error { - e := p.WriteByte(byte(keyType)) - if e != nil { - return e - } - e = p.WriteByte(byte(valueType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *BinaryProtocol) WriteMapEnd() error { - return nil -} - -func (p *BinaryProtocol) WriteListBegin(elemType Type, size int) error { - e := p.WriteByte(byte(elemType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *BinaryProtocol) WriteListEnd() error { - return nil -} - -func (p *BinaryProtocol) WriteSetBegin(elemType Type, size int) error { - e := p.WriteByte(byte(elemType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *BinaryProtocol) WriteSetEnd() error { - return nil -} - -func (p *BinaryProtocol) WriteBool(value bool) error { - if value { - return p.WriteByte(1) - } - return p.WriteByte(0) -} - -func (p *BinaryProtocol) WriteByte(value byte) error { - e := p.trans.WriteByte(value) - return NewProtocolException(e) -} - -func (p *BinaryProtocol) WriteI16(value int16) error { - v := p.buffer[0:2] - binary.BigEndian.PutUint16(v, uint16(value)) - _, e := p.writer.Write(v) - return NewProtocolException(e) -} - -func (p *BinaryProtocol) WriteI32(value int32) error { - v := p.buffer[0:4] - binary.BigEndian.PutUint32(v, uint32(value)) - _, e := p.writer.Write(v) - return NewProtocolException(e) -} - -func (p *BinaryProtocol) WriteI64(value int64) error { - v := p.buffer[0:8] - binary.BigEndian.PutUint64(v, uint64(value)) - _, err := p.writer.Write(v) - return NewProtocolException(err) -} - -func (p *BinaryProtocol) WriteDouble(value float64) error { - return p.WriteI64(int64(math.Float64bits(value))) -} - -func (p *BinaryProtocol) WriteFloat(value float32) error { - return p.WriteI32(int32(math.Float32bits(value))) -} - -func (p *BinaryProtocol) WriteString(value string) error { - e := p.WriteI32(int32(len(value))) - if e != nil { - return e - } - _, err := p.trans.WriteString(value) - return NewProtocolException(err) -} - -func (p *BinaryProtocol) WriteBinary(value []byte) error { - e := p.WriteI32(int32(len(value))) - if e != nil { - return e - } - _, err := p.writer.Write(value) - return NewProtocolException(err) -} - -/** - * Reading methods - */ - -func (p *BinaryProtocol) ReadMessageBegin() (name string, typeId MessageType, seqId int32, err error) { - size, e := p.ReadI32() - if e != nil { - return "", typeId, 0, NewProtocolException(e) - } - if size < 0 { - typeId = MessageType(size & 0x0ff) - version := int64(int64(size) & VERSION_MASK) - if version != VERSION_1 { - return name, typeId, seqId, NewProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) - } - name, e = p.ReadString() - if e != nil { - return name, typeId, seqId, NewProtocolException(e) - } - seqId, e = p.ReadI32() - if e != nil { - return name, typeId, seqId, NewProtocolException(e) - } - return name, typeId, seqId, nil - } - if p.strictRead { - return name, typeId, seqId, NewProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) - } - name, e2 := p.readStringBody(size) - if e2 != nil { - return name, typeId, seqId, e2 - } - b, e3 := p.ReadByte() - if e3 != nil { - return name, typeId, seqId, e3 - } - typeId = MessageType(b) - seqId, e4 := p.ReadI32() - if e4 != nil { - return name, typeId, seqId, e4 - } - return name, typeId, seqId, nil -} - -func (p *BinaryProtocol) ReadMessageEnd() error { - return nil -} - -func (p *BinaryProtocol) ReadStructBegin() (name string, err error) { - return -} - -func (p *BinaryProtocol) ReadStructEnd() error { - return nil -} - -func (p *BinaryProtocol) ReadFieldBegin() (name string, typeId Type, seqId int16, err error) { - t, err := p.ReadByte() - typeId = Type(t) - if err != nil { - return name, typeId, seqId, err - } - if t != STOP { - seqId, err = p.ReadI16() - } - return name, typeId, seqId, err -} - -func (p *BinaryProtocol) ReadFieldEnd() error { - return nil -} - -var invalidDataLength = NewProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length")) - -func (p *BinaryProtocol) ReadMapBegin() (kType, vType Type, size int, err error) { - k, e := p.ReadByte() - if e != nil { - err = NewProtocolException(e) - return - } - kType = Type(k) - v, e := p.ReadByte() - if e != nil { - err = NewProtocolException(e) - return - } - vType = Type(v) - size32, e := p.ReadI32() - if e != nil { - err = NewProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return kType, vType, size, nil -} - -func (p *BinaryProtocol) ReadMapEnd() error { - return nil -} - -func (p *BinaryProtocol) ReadListBegin() (elemType Type, size int, err error) { - b, e := p.ReadByte() - if e != nil { - err = NewProtocolException(e) - return - } - elemType = Type(b) - size32, e := p.ReadI32() - if e != nil { - err = NewProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - - return -} - -func (p *BinaryProtocol) ReadListEnd() error { - return nil -} - -func (p *BinaryProtocol) ReadSetBegin() (elemType Type, size int, err error) { - b, e := p.ReadByte() - if e != nil { - err = NewProtocolException(e) - return - } - elemType = Type(b) - size32, e := p.ReadI32() - if e != nil { - err = NewProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return elemType, size, nil -} - -func (p *BinaryProtocol) ReadSetEnd() error { - return nil -} - -func (p *BinaryProtocol) ReadBool() (bool, error) { - b, e := p.ReadByte() - v := true - if b != 1 { - v = false - } - return v, e -} - -func (p *BinaryProtocol) ReadByte() (byte, error) { - v, err := p.trans.ReadByte() - return byte(v), err -} - -func (p *BinaryProtocol) ReadI16() (value int16, err error) { - buf := p.buffer[0:2] - err = p.readAll(buf) - value = int16(binary.BigEndian.Uint16(buf)) - return value, err -} - -func (p *BinaryProtocol) ReadI32() (value int32, err error) { - buf := p.buffer[0:4] - err = p.readAll(buf) - value = int32(binary.BigEndian.Uint32(buf)) - return value, err -} - -func (p *BinaryProtocol) ReadI64() (value int64, err error) { - buf := p.buffer[0:8] - err = p.readAll(buf) - value = int64(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *BinaryProtocol) ReadDouble() (value float64, err error) { - buf := p.buffer[0:8] - err = p.readAll(buf) - value = math.Float64frombits(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *BinaryProtocol) ReadFloat() (value float32, err error) { - buf := p.buffer[0:4] - err = p.readAll(buf) - value = math.Float32frombits(binary.BigEndian.Uint32(buf)) - return value, err -} - -func (p *BinaryProtocol) ReadString() (value string, err error) { - size, e := p.ReadI32() - if e != nil { - return "", e - } - if size < 0 { - err = invalidDataLength - return - } - - return p.readStringBody(size) -} - -func (p *BinaryProtocol) ReadBinary() ([]byte, error) { - size, e := p.ReadI32() - if e != nil { - return nil, e - } - if size < 0 { - return nil, invalidDataLength - } - if uint64(size) > p.trans.RemainingBytes() { - return nil, invalidDataLength - } - - isize := int(size) - buf := make([]byte, isize) - _, err := io.ReadFull(p.trans, buf) - return buf, NewProtocolException(err) -} - -func (p *BinaryProtocol) Flush() (err error) { - return NewProtocolException(p.trans.Flush()) -} - -func (p *BinaryProtocol) Skip(fieldType Type) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *BinaryProtocol) Transport() Transport { - return p.origTransport -} - -func (p *BinaryProtocol) readAll(buf []byte) error { - _, err := io.ReadFull(p.reader, buf) - return NewProtocolException(err) -} - -func (p *BinaryProtocol) readStringBody(size int32) (value string, err error) { - if size < 0 { - return "", nil - } - if uint64(size) > p.trans.RemainingBytes() { - return "", invalidDataLength - } - var buf []byte - if int(size) <= len(p.buffer) { - buf = p.buffer[0:size] - } else { - buf = make([]byte, size) - } - _, e := io.ReadFull(p.trans, buf) - return string(buf), NewProtocolException(e) -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/buffered_transport.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/buffered_transport.go deleted file mode 100644 index cf590bf1..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/buffered_transport.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "bufio" -) - -type BufferedTransportFactory struct { - size int -} - -type BufferedTransport struct { - bufio.ReadWriter - tp Transport -} - -func (p *BufferedTransportFactory) GetTransport(trans Transport) Transport { - return NewBufferedTransport(trans, p.size) -} - -func NewBufferedTransportFactory(bufferSize int) *BufferedTransportFactory { - return &BufferedTransportFactory{size: bufferSize} -} - -func NewBufferedTransport(trans Transport, bufferSize int) *BufferedTransport { - return &BufferedTransport{ - ReadWriter: bufio.ReadWriter{ - Reader: bufio.NewReaderSize(trans, bufferSize), - Writer: bufio.NewWriterSize(trans, bufferSize), - }, - tp: trans, - } -} - -func (p *BufferedTransport) IsOpen() bool { - return p.tp.IsOpen() -} - -func (p *BufferedTransport) Open() (err error) { - return p.tp.Open() -} - -func (p *BufferedTransport) Close() (err error) { - return p.tp.Close() -} - -func (p *BufferedTransport) Read(b []byte) (int, error) { - n, err := p.ReadWriter.Read(b) - if err != nil { - p.ReadWriter.Reader.Reset(p.tp) - } - return n, err -} - -func (p *BufferedTransport) Write(b []byte) (int, error) { - n, err := p.ReadWriter.Write(b) - if err != nil { - p.ReadWriter.Writer.Reset(p.tp) - } - return n, err -} - -func (p *BufferedTransport) Flush() error { - if err := p.ReadWriter.Flush(); err != nil { - p.ReadWriter.Writer.Reset(p.tp) - return err - } - return p.tp.Flush() -} - -func (p *BufferedTransport) RemainingBytes() (num_bytes uint64) { - return p.tp.RemainingBytes() -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/client_interface.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/client_interface.go deleted file mode 100644 index e6c3b872..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/client_interface.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package thrift - -// ClientInterface specifies the common methods every thrift client -// should implement -type ClientInterface interface { - Open() error - Close() error - IsOpen() bool -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/clientconn.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/clientconn.go deleted file mode 100644 index 0b6e7b27..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/clientconn.go +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package thrift - -import ( - "fmt" -) - -// ClientConn holds all the connection information for a thrift client -type ClientConn struct { - transport Transport - protocolFactory ProtocolFactory - iproto Protocol - oproto Protocol - seqID int32 -} - -// Transport returns the underlying Transport object inside the ClientConn -// object -func (cc *ClientConn) Transport() Transport { - return cc.transport -} - -// NewClientConn creates a new ClientConn object using the provided ProtocolFactory -func NewClientConn(t Transport, pf ProtocolFactory) ClientConn { - return ClientConn{ - transport: t, - protocolFactory: pf, - iproto: pf.GetProtocol(t), - oproto: pf.GetProtocol(t), - } -} - -// NewClientConnWithProtocols creates a new ClientConn object using the input and output protocols provided -func NewClientConnWithProtocols(t Transport, iproto, oproto Protocol) ClientConn { - return ClientConn{ - transport: t, - protocolFactory: nil, - iproto: iproto, - oproto: oproto, - } -} - -// IRequest represents a request to be sent to a thrift endpoint -type IRequest interface { - Write(p Protocol) error -} - -// IResponse represents a response received from a thrift call -type IResponse interface { - Read(p Protocol) error -} - -// Open opens the client connection -func (cc *ClientConn) Open() error { - return cc.transport.Open() -} - -// Close closes the client connection -func (cc *ClientConn) Close() error { - return cc.transport.Close() -} - -// IsOpen return true if the client connection is open; otherwise, it returns false. -func (cc *ClientConn) IsOpen() bool { - return cc.transport.IsOpen() -} - -// SendMsg sends a request to a given thrift endpoint -func (cc *ClientConn) SendMsg(method string, req IRequest, msgType MessageType) error { - cc.seqID++ - - if err := cc.oproto.WriteMessageBegin(method, msgType, cc.seqID); err != nil { - return err - } - - if err := req.Write(cc.oproto); err != nil { - return err - } - - if err := cc.oproto.WriteMessageEnd(); err != nil { - return err - } - - return cc.oproto.Flush() -} - -// RecvMsg receives the response from a call to a thrift endpoint -func (cc *ClientConn) RecvMsg(method string, res IResponse) error { - recvMethod, mTypeID, seqID, err := cc.iproto.ReadMessageBegin() - - if err != nil { - return err - } - - if method != recvMethod { - return NewApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s failed: wrong method name", method)) - } - - if cc.seqID != seqID { - return NewApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s failed: out of sequence response", method)) - } - - switch mTypeID { - case REPLY: - if err := res.Read(cc.iproto); err != nil { - return err - } - - return cc.iproto.ReadMessageEnd() - case EXCEPTION: - err := NewApplicationException(UNKNOWN_APPLICATION_EXCEPTION, "Unknown exception") - - recvdErr, readErr := err.Read(cc.iproto) - - if readErr != nil { - return readErr - } - - if msgEndErr := cc.iproto.ReadMessageEnd(); msgEndErr != nil { - return msgEndErr - } - return recvdErr - default: - return NewApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s failed: invalid message type", method)) - } -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/compact_protocol.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/compact_protocol.go deleted file mode 100644 index 347ebd7a..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/compact_protocol.go +++ /dev/null @@ -1,859 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - COMPACT_PROTOCOL_ID = 0x082 - COMPACT_VERSION = 0x01 - COMPACT_VERSION_BE = 0x02 - COMPACT_VERSION_MASK = 0x1f - COMPACT_TYPE_MASK = 0x0E0 - COMPACT_TYPE_BITS = 0x07 - COMPACT_TYPE_SHIFT_AMOUNT = 5 -) - -type compactType byte - -const ( - COMPACT_BOOLEAN_TRUE = 0x01 - COMPACT_BOOLEAN_FALSE = 0x02 - COMPACT_BYTE = 0x03 - COMPACT_I16 = 0x04 - COMPACT_I32 = 0x05 - COMPACT_I64 = 0x06 - COMPACT_DOUBLE = 0x07 - COMPACT_BINARY = 0x08 - COMPACT_LIST = 0x09 - COMPACT_SET = 0x0A - COMPACT_MAP = 0x0B - COMPACT_STRUCT = 0x0C - COMPACT_FLOAT = 0x0D -) - -var ( - typeToCompactType map[Type]compactType -) - -func init() { - typeToCompactType = map[Type]compactType{ - STOP: STOP, - BOOL: COMPACT_BOOLEAN_TRUE, - BYTE: COMPACT_BYTE, - I16: COMPACT_I16, - I32: COMPACT_I32, - I64: COMPACT_I64, - DOUBLE: COMPACT_DOUBLE, - FLOAT: COMPACT_FLOAT, - STRING: COMPACT_BINARY, - LIST: COMPACT_LIST, - SET: COMPACT_SET, - MAP: COMPACT_MAP, - STRUCT: COMPACT_STRUCT, - } -} - -type CompactProtocolFactory struct{} - -func NewCompactProtocolFactory() *CompactProtocolFactory { - return &CompactProtocolFactory{} -} - -func (p *CompactProtocolFactory) GetProtocol(trans Transport) Protocol { - return NewCompactProtocol(trans) -} - -type CompactProtocol struct { - trans RichTransport - origTransport Transport - - // Used to keep track of the last field for the current and previous structs, - // so we can do the delta stuff. - lastField []int - lastFieldId int - - // If we encounter a boolean field begin, save the Field here so it can - // have the value incorporated. - booleanFieldName string - booleanFieldId int16 - booleanFieldPending bool - - // If we read a field header, and it's a boolean field, save the boolean - // value here so that readBool can use it. - boolValue bool - boolValueIsNotNull bool - buffer [64]byte - - version int -} - -// Create a CompactProtocol given a Transport -func NewCompactProtocol(trans Transport) *CompactProtocol { - p := &CompactProtocol{origTransport: trans, lastField: []int{}, version: COMPACT_VERSION_BE} - if et, ok := trans.(RichTransport); ok { - p.trans = et - } else { - p.trans = NewRichTransport(trans) - } - - return p -} - -// -// Public Writing methods. -// - -// Write a message header to the wire. Compact Protocol messages contain the -// protocol version so we can migrate forwards in the future if need be. -func (p *CompactProtocol) WriteMessageBegin(name string, typeId MessageType, seqid int32) error { - err := p.writeByteDirect(COMPACT_PROTOCOL_ID) - if err != nil { - return NewProtocolException(err) - } - err = p.writeByteDirect((byte(p.version) & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) - if err != nil { - return NewProtocolException(err) - } - _, err = p.writeVarint32(seqid) - if err != nil { - return NewProtocolException(err) - } - e := p.WriteString(name) - return e - -} - -func (p *CompactProtocol) WriteMessageEnd() error { return nil } - -// Write a struct begin. This doesn't actually put anything on the wire. We -// use it as an opportunity to put special placeholder markers on the field -// stack so we can get the field id deltas correct. -func (p *CompactProtocol) WriteStructBegin(name string) error { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return nil -} - -// Write a struct end. This doesn't actually put anything on the wire. We use -// this as an opportunity to pop the last field from the current struct off -// of the field stack. -func (p *CompactProtocol) WriteStructEnd() error { - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -func (p *CompactProtocol) WriteFieldBegin(name string, typeId Type, id int16) error { - if typeId == BOOL { - // we want to possibly include the value, so we'll wait. - p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true - return nil - } - _, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF) - return NewProtocolException(err) -} - -// The workhorse of writeFieldBegin. It has the option of doing a -// 'type override' of the type header. This is used specifically in the -// boolean field case. -func (p *CompactProtocol) writeFieldBeginInternal(name string, typeId Type, id int16, typeOverride byte) (int, error) { - // short lastField = lastField_.pop(); - - // if there's a type override, use that. - var typeToWrite byte - if typeOverride == 0xFF { - typeToWrite = byte(p.getCompactType(typeId)) - } else { - typeToWrite = typeOverride - } - // check if we can use delta encoding for the field id - fieldId := int(id) - written := 0 - if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { - // write them together - err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) - if err != nil { - return 0, err - } - } else { - // write them separate - err := p.writeByteDirect(typeToWrite) - if err != nil { - return 0, err - } - err = p.WriteI16(id) - written = 1 + 2 - if err != nil { - return 0, err - } - } - - p.lastFieldId = fieldId - // p.lastField.Push(field.id); - return written, nil -} - -func (p *CompactProtocol) WriteFieldEnd() error { return nil } - -func (p *CompactProtocol) WriteFieldStop() error { - err := p.writeByteDirect(STOP) - return NewProtocolException(err) -} - -func (p *CompactProtocol) WriteMapBegin(keyType Type, valueType Type, size int) error { - if size == 0 { - err := p.writeByteDirect(0) - return NewProtocolException(err) - } - _, err := p.writeVarint32(int32(size)) - if err != nil { - return NewProtocolException(err) - } - err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) - return NewProtocolException(err) -} - -func (p *CompactProtocol) WriteMapEnd() error { return nil } - -// Write a list header. -func (p *CompactProtocol) WriteListBegin(elemType Type, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewProtocolException(err) -} - -func (p *CompactProtocol) WriteListEnd() error { return nil } - -// Write a set header. -func (p *CompactProtocol) WriteSetBegin(elemType Type, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewProtocolException(err) -} - -func (p *CompactProtocol) WriteSetEnd() error { return nil } - -func (p *CompactProtocol) WriteBool(value bool) error { - v := byte(COMPACT_BOOLEAN_FALSE) - if value { - v = byte(COMPACT_BOOLEAN_TRUE) - } - if p.booleanFieldPending { - // we haven't written the field header yet - _, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v) - p.booleanFieldPending = false - return NewProtocolException(err) - } - // we're not part of a field, so just write the value. - err := p.writeByteDirect(v) - return NewProtocolException(err) -} - -// Write a byte. Nothing to see here! -func (p *CompactProtocol) WriteByte(value byte) error { - err := p.writeByteDirect(value) - return NewProtocolException(err) -} - -// Write an I16 as a zigzag varint. -func (p *CompactProtocol) WriteI16(value int16) error { - _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) - return NewProtocolException(err) -} - -// Write an i32 as a zigzag varint. -func (p *CompactProtocol) WriteI32(value int32) error { - _, err := p.writeVarint32(p.int32ToZigzag(value)) - return NewProtocolException(err) -} - -// Write an i64 as a zigzag varint. -func (p *CompactProtocol) WriteI64(value int64) error { - _, err := p.writeVarint64(p.int64ToZigzag(value)) - return NewProtocolException(err) -} - -// Write a double to the wire as 8 bytes. -func (p *CompactProtocol) WriteDouble(value float64) error { - buf := p.buffer[0:8] - if p.version == COMPACT_VERSION { - binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) - } else { - binary.BigEndian.PutUint64(buf, math.Float64bits(value)) - } - _, err := p.trans.Write(buf) - return NewProtocolException(err) -} - -// Write a float to the wire as 4 bytes. -func (p *CompactProtocol) WriteFloat(value float32) error { - buf := p.buffer[0:4] - binary.BigEndian.PutUint32(buf, math.Float32bits(value)) - _, err := p.trans.Write(buf) - return NewProtocolException(err) -} - -// Write a string to the wire with a varint size preceding. -func (p *CompactProtocol) WriteString(value string) error { - _, e := p.writeVarint32(int32(len(value))) - if e != nil { - return NewProtocolException(e) - } - if len(value) > 0 { - } - _, e = p.trans.WriteString(value) - return e -} - -// Write a byte array, using a varint for the size. -func (p *CompactProtocol) WriteBinary(bin []byte) error { - _, e := p.writeVarint32(int32(len(bin))) - if e != nil { - return NewProtocolException(e) - } - if len(bin) > 0 { - _, e = p.trans.Write(bin) - return NewProtocolException(e) - } - return nil -} - -// -// Reading methods. -// - -// Read a message header. -func (p *CompactProtocol) ReadMessageBegin() (name string, typeId MessageType, seqId int32, err error) { - - protocolId, err := p.readByteDirect() - if err != nil { - return - } - - if protocolId != COMPACT_PROTOCOL_ID { - e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId) - return "", typeId, seqId, NewProtocolExceptionWithType(BAD_VERSION, e) - } - - versionAndType, err := p.readByteDirect() - if err != nil { - return - } - - version := versionAndType & COMPACT_VERSION_MASK - typeId = MessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) - if version == COMPACT_VERSION || version == COMPACT_VERSION_BE { - p.version = int(version) - } else { - e := fmt.Errorf("Expected version %02x or %02x but got %02x", COMPACT_VERSION, COMPACT_VERSION_BE, version) - err = NewProtocolExceptionWithType(BAD_VERSION, e) - return - } - seqId, e := p.readVarint32() - if e != nil { - err = NewProtocolException(e) - return - } - name, err = p.ReadString() - return -} - -func (p *CompactProtocol) ReadMessageEnd() error { return nil } - -// Read a struct begin. There's nothing on the wire for this, but it is our -// opportunity to push a new struct begin marker onto the field stack. -func (p *CompactProtocol) ReadStructBegin() (name string, err error) { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return -} - -// Doesn't actually consume any wire data, just removes the last field for -// this struct from the field stack. -func (p *CompactProtocol) ReadStructEnd() error { - // consume the last field we read off the wire. - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -// Read a field header off the wire. -func (p *CompactProtocol) ReadFieldBegin() (name string, typeId Type, id int16, err error) { - t, err := p.readByteDirect() - if err != nil { - return - } - - // if it's a stop, then we can return immediately, as the struct is over. - if (t & 0x0f) == STOP { - return "", STOP, 0, nil - } - - // mask off the 4 MSB of the type header. it could contain a field id delta. - modifier := int16((t & 0xf0) >> 4) - if modifier == 0 { - // not a delta. look ahead for the zigzag varint field id. - id, err = p.ReadI16() - if err != nil { - return - } - } else { - // has a delta. add the delta to the last read field id. - id = int16(p.lastFieldId) + modifier - } - typeId, e := p.getType(compactType(t & 0x0f)) - if e != nil { - err = NewProtocolException(e) - return - } - - // if this happens to be a boolean field, the value is encoded in the type - if p.isBoolType(t) { - // save the boolean value in a special instance variable. - p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) - p.boolValueIsNotNull = true - } - - // push the new field onto the field stack so we can keep the deltas going. - p.lastFieldId = int(id) - return -} - -func (p *CompactProtocol) ReadFieldEnd() error { return nil } - -// Read a map header off the wire. If the size is zero, skip reading the key -// and value type. This means that 0-length maps will yield Maps without the -// "correct" types. -func (p *CompactProtocol) ReadMapBegin() (keyType Type, valueType Type, size int, err error) { - size32, e := p.readVarint32() - if e != nil { - err = NewProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - - keyAndValueType := byte(STOP) - if size != 0 { - keyAndValueType, err = p.readByteDirect() - if err != nil { - return - } - } - keyType, _ = p.getType(compactType(keyAndValueType >> 4)) - valueType, _ = p.getType(compactType(keyAndValueType & 0xf)) - return -} - -func (p *CompactProtocol) ReadMapEnd() error { return nil } - -// Read a list header off the wire. If the list size is 0-14, the size will -// be packed into the element type header. If it's a longer list, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *CompactProtocol) ReadListBegin() (elemType Type, size int, err error) { - size_and_type, err := p.readByteDirect() - if err != nil { - return - } - size = int((size_and_type >> 4) & 0x0f) - if size == 15 { - size2, e := p.readVarint32() - if e != nil { - err = NewProtocolException(e) - return - } - if size2 < 0 { - err = invalidDataLength - return - } - size = int(size2) - } - elemType, e := p.getType(compactType(size_and_type)) - if e != nil { - err = NewProtocolException(e) - return - } - return -} - -func (p *CompactProtocol) ReadListEnd() error { return nil } - -// Read a set header off the wire. If the set size is 0-14, the size will -// be packed into the element type header. If it's a longer set, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *CompactProtocol) ReadSetBegin() (elemType Type, size int, err error) { - return p.ReadListBegin() -} - -func (p *CompactProtocol) ReadSetEnd() error { return nil } - -// Read a boolean off the wire. If this is a boolean field, the value should -// already have been read during readFieldBegin, so we'll just consume the -// pre-stored value. Otherwise, read a byte. -func (p *CompactProtocol) ReadBool() (value bool, err error) { - if p.boolValueIsNotNull { - p.boolValueIsNotNull = false - return p.boolValue, nil - } - v, err := p.readByteDirect() - return v == COMPACT_BOOLEAN_TRUE, err -} - -// Read a single byte off the wire. Nothing interesting here. -func (p *CompactProtocol) ReadByte() (byte, error) { - v, err := p.readByteDirect() - if err != nil { - return 0, NewProtocolException(err) - } - return v, err -} - -// Read an i16 from the wire as a zigzag varint. -func (p *CompactProtocol) ReadI16() (value int16, err error) { - v, err := p.ReadI32() - return int16(v), err -} - -// Read an i32 from the wire as a zigzag varint. -func (p *CompactProtocol) ReadI32() (value int32, err error) { - v, e := p.readVarint32() - if e != nil { - return 0, NewProtocolException(e) - } - value = p.zigzagToInt32(v) - return value, nil -} - -// Read an i64 from the wire as a zigzag varint. -func (p *CompactProtocol) ReadI64() (value int64, err error) { - v, e := p.readVarint64() - if e != nil { - return 0, NewProtocolException(e) - } - value = p.zigzagToInt64(v) - return value, nil -} - -// No magic here - just read a double off the wire. -func (p *CompactProtocol) ReadDouble() (value float64, err error) { - longBits := p.buffer[0:8] - _, e := io.ReadFull(p.trans, longBits) - if e != nil { - return 0.0, NewProtocolException(e) - } - if p.version == COMPACT_VERSION { - return math.Float64frombits(binary.LittleEndian.Uint64(longBits)), nil - } else { - return math.Float64frombits(binary.BigEndian.Uint64(longBits)), nil - } -} - -// No magic here - just read a float off the wire. -func (p *CompactProtocol) ReadFloat() (value float32, err error) { - bits := p.buffer[0:4] - _, e := io.ReadFull(p.trans, bits) - if e != nil { - return 0.0, NewProtocolException(e) - } - return math.Float32frombits(binary.BigEndian.Uint32(bits)), nil -} - -// Reads a []byte (via readBinary), and then UTF-8 decodes it. -func (p *CompactProtocol) ReadString() (value string, err error) { - length, e := p.readVarint32() - if e != nil { - return "", NewProtocolException(e) - } - if length < 0 { - return "", invalidDataLength - } - if uint64(length) > p.trans.RemainingBytes() { - return "", invalidDataLength - } - - if length == 0 { - return "", nil - } - var buf []byte - if length <= int32(len(p.buffer)) { - buf = p.buffer[0:length] - } else { - buf = make([]byte, length) - } - _, e = io.ReadFull(p.trans, buf) - return string(buf), NewProtocolException(e) -} - -// Read a []byte from the wire. -func (p *CompactProtocol) ReadBinary() (value []byte, err error) { - length, e := p.readVarint32() - if e != nil { - return nil, NewProtocolException(e) - } - if length == 0 { - return []byte{}, nil - } - if length < 0 { - return nil, invalidDataLength - } - if uint64(length) > p.trans.RemainingBytes() { - return nil, invalidDataLength - } - - buf := make([]byte, length) - _, e = io.ReadFull(p.trans, buf) - return buf, NewProtocolException(e) -} - -func (p *CompactProtocol) Flush() (err error) { - return NewProtocolException(p.trans.Flush()) -} - -func (p *CompactProtocol) Skip(fieldType Type) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *CompactProtocol) Transport() Transport { - return p.origTransport -} - -// -// Internal writing methods -// - -// Abstract method for writing the start of lists and sets. List and sets on -// the wire differ only by the type indicator. -func (p *CompactProtocol) writeCollectionBegin(elemType Type, size int) (int, error) { - if size <= 14 { - return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) - } - err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) - if err != nil { - return 0, err - } - m, err := p.writeVarint32(int32(size)) - return 1 + m, err -} - -// Write an i32 as a varint. Results in 1-5 bytes on the wire. -// TODO(pomack): make a permanent buffer like writeVarint64? -func (p *CompactProtocol) writeVarint32(n int32) (int, error) { - i32buf := p.buffer[0:5] - idx := 0 - for { - if (n & ^0x7F) == 0 { - i32buf[idx] = byte(n) - idx++ - // p.writeByteDirect(byte(n)); - break - // return; - } else { - i32buf[idx] = byte((n & 0x7F) | 0x80) - idx++ - // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); - u := uint32(n) - n = int32(u >> 7) - } - } - return p.trans.Write(i32buf[0:idx]) -} - -// Write an i64 as a varint. Results in 1-10 bytes on the wire. -func (p *CompactProtocol) writeVarint64(n int64) (int, error) { - varint64out := p.buffer[0:10] - idx := 0 - for { - if (n & ^0x7F) == 0 { - varint64out[idx] = byte(n) - idx++ - break - } else { - varint64out[idx] = byte((n & 0x7F) | 0x80) - idx++ - u := uint64(n) - n = int64(u >> 7) - } - } - return p.trans.Write(varint64out[0:idx]) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *CompactProtocol) int64ToZigzag(l int64) int64 { - return (l << 1) ^ (l >> 63) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *CompactProtocol) int32ToZigzag(n int32) int32 { - return (n << 1) ^ (n >> 31) -} - -func (p *CompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) { - binary.LittleEndian.PutUint64(buf, n) -} - -func (p *CompactProtocol) fixedInt64ToBytes(n int64, buf []byte) { - binary.LittleEndian.PutUint64(buf, uint64(n)) -} - -// Writes a byte without any possibility of all that field header nonsense. -// Used internally by other writing methods that know they need to write a byte. -func (p *CompactProtocol) writeByteDirect(b byte) error { - return p.trans.WriteByte(b) -} - -// Writes a byte without any possibility of all that field header nonsense. -func (p *CompactProtocol) writeIntAsByteDirect(n int) (int, error) { - return 1, p.writeByteDirect(byte(n)) -} - -// -// Internal reading methods -// - -// Read an i32 from the wire as a varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 5 bytes. -func (p *CompactProtocol) readVarint32() (int32, error) { - // if the wire contains the right stuff, this will just truncate the i64 we - // read and get us the right sign. - v, err := p.readVarint64() - return int32(v), err -} - -// Read an i64 from the wire as a proper varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 10 bytes. -func (p *CompactProtocol) readVarint64() (int64, error) { - shift := uint(0) - result := int64(0) - for { - b, err := p.readByteDirect() - if err != nil { - return 0, err - } - result |= int64(b&0x7f) << shift - if (b & 0x80) != 0x80 { - break - } - shift += 7 - } - return result, nil -} - -// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. -func (p *CompactProtocol) readByteDirect() (byte, error) { - return p.trans.ReadByte() -} - -// -// encoding helpers -// - -// Convert from zigzag int to int. -func (p *CompactProtocol) zigzagToInt32(n int32) int32 { - u := uint32(n) - return int32(u>>1) ^ -(n & 1) -} - -// Convert from zigzag long to long. -func (p *CompactProtocol) zigzagToInt64(n int64) int64 { - u := uint64(n) - return int64(u>>1) ^ -(n & 1) -} - -func (p *CompactProtocol) bytesToInt32(b []byte) int32 { - return int32(binary.LittleEndian.Uint32(b)) -} - -func (p *CompactProtocol) bytesToUint32(b []byte) uint32 { - return binary.LittleEndian.Uint32(b) - -} - -// Note that it's important that the mask bytes are long literals, -// otherwise they'll default to ints, and when you shift an int left 56 bits, -// you just get a messed up int. -func (p *CompactProtocol) bytesToInt64(b []byte) int64 { - return int64(binary.LittleEndian.Uint64(b)) -} - -// Note that it's important that the mask bytes are long literals, -// otherwise they'll default to ints, and when you shift an int left 56 bits, -// you just get a messed up int. -func (p *CompactProtocol) bytesToUint64(b []byte) uint64 { - return binary.LittleEndian.Uint64(b) -} - -// -// type testing and converting -// - -func (p *CompactProtocol) isBoolType(b byte) bool { - return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE -} - -// Given a compactType constant, convert it to its corresponding -// Type value. -func (p *CompactProtocol) getType(t compactType) (Type, error) { - switch byte(t) & 0x0f { - case STOP: - return STOP, nil - case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: - return BOOL, nil - case COMPACT_BYTE: - return BYTE, nil - case COMPACT_I16: - return I16, nil - case COMPACT_I32: - return I32, nil - case COMPACT_I64: - return I64, nil - case COMPACT_DOUBLE: - return DOUBLE, nil - case COMPACT_FLOAT: - return FLOAT, nil - case COMPACT_BINARY: - return STRING, nil - case COMPACT_LIST: - return LIST, nil - case COMPACT_SET: - return SET, nil - case COMPACT_MAP: - return MAP, nil - case COMPACT_STRUCT: - return STRUCT, nil - } - return STOP, Exception(fmt.Errorf("don't know what type: %#x", t&0x0f)) -} - -// Given a Type value, find the appropriate CompactProtocol.Types constant. -func (p *CompactProtocol) getCompactType(t Type) compactType { - return typeToCompactType[t] -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/concurrent_server.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/concurrent_server.go deleted file mode 100644 index 7cbaef12..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/concurrent_server.go +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "context" - "fmt" - "log" - "runtime/debug" - "sync" -) - -// ConcurrentServer is the concurrent counterpart of SimpleServer -// It is able to process out-of-order requests on the same transport -type ConcurrentServer struct { - *SimpleServer -} - -// NewConcurrentServer create a new NewConcurrentServer -func NewConcurrentServer(processor Processor, serverTransport ServerTransport, options ...func(*ServerOptions)) *ConcurrentServer { - return NewConcurrentServerFactory(NewProcessorFactory(processor), serverTransport, options...) -} - -// NewConcurrentServerFactory create a new server factory -func NewConcurrentServerFactory(processorFactory ProcessorFactory, serverTransport ServerTransport, options ...func(*ServerOptions)) *ConcurrentServer { - return NewConcurrentServerFactoryContext(NewProcessorFactoryContextAdapter(processorFactory), serverTransport, options...) -} - -// NewConcurrentServerContext is a version of the ConcurrentServer that supports contexts. -func NewConcurrentServerContext(processor ProcessorContext, serverTransport ServerTransport, options ...func(*ServerOptions)) *ConcurrentServer { - return NewConcurrentServerFactoryContext(NewProcessorFactoryContext(processor), serverTransport, options...) -} - -// NewConcurrentServerFactoryContext is a version of the ConcurrentServerFactory that supports contexts. -func NewConcurrentServerFactoryContext(processorFactory ProcessorFactoryContext, serverTransport ServerTransport, options ...func(*ServerOptions)) *ConcurrentServer { - srv := &ConcurrentServer{ - SimpleServer: NewSimpleServerFactoryContext(processorFactory, serverTransport, options...), - } - srv.SimpleServer.configurableRequestProcessor = srv.processRequests - return srv -} - -func (p *ConcurrentServer) processRequests(ctx context.Context, client Transport) error { - processor := p.processorFactoryContext.GetProcessorContext(client) - var ( - inputTransport, outputTransport Transport - inputProtocol, outputProtocol Protocol - ) - - inputTransport = p.inputTransportFactory.GetTransport(client) - - // Special case for Header, it requires that the transport/protocol for - // input/output is the same object (to track session state). - if _, ok := inputTransport.(*HeaderTransport); ok { - outputTransport = nil - inputProtocol = p.inputProtocolFactory.GetProtocol(inputTransport) - outputProtocol = inputProtocol - } else { - outputTransport = p.outputTransportFactory.GetTransport(client) - inputProtocol = p.inputProtocolFactory.GetProtocol(inputTransport) - outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport) - } - - // recover from any panic in the processor, so it doesn't crash the - // thrift server - defer func() { - if e := recover(); e != nil { - log.Printf("panic in processor: %s: %s", e, debug.Stack()) - } - }() - if inputTransport != nil { - defer inputTransport.Close() - } - if outputTransport != nil { - defer outputTransport.Close() - } - intProcessor := WrapInterceptorContext(p.interceptor, processor) - - // WARNING: This server implementation has a host of problems, and is included - // to preserve previous behavior. If you really want a production quality thrift - // server, use simple server or write your own. - // - // In the concurrent server case, we wish to handle multiple concurrent requests - // on a single transport. To do this, we re-implement the generated Process() - // function inline for greater control, then directly interact with the Read(), - // Run(), and Write() functionality. - // - // Note, for a very high performance server, it is unclear that this unbounded - // concurrency is productive for maintaining maximal throughput with good - // characteristics under load. - var writeLock sync.Mutex - for { - name, _, seqID, err := inputProtocol.ReadMessageBegin() - if err != nil { - if err, ok := err.(TransportException); ok && err.TypeID() == END_OF_FILE { - // connection terminated because client closed connection - break - } - return err - } - pfunc, err := intProcessor.GetProcessorFunctionContext(name) - if pfunc == nil || err != nil { - if err == nil { - err = fmt.Errorf("no such function: %q", name) - } - inputProtocol.Skip(STRUCT) - inputProtocol.ReadMessageEnd() - exc := NewApplicationException(UNKNOWN_METHOD, err.Error()) - - // protect message writing - writeLock.Lock() - defer writeLock.Unlock() - - outputProtocol.WriteMessageBegin(name, EXCEPTION, seqID) - exc.Write(outputProtocol) - outputProtocol.WriteMessageEnd() - outputProtocol.Flush() - return exc - } - argStruct, err := pfunc.Read(inputProtocol) - if err != nil { - return err - } - go func() { - var result WritableStruct - result, err = pfunc.RunContext(ctx, argStruct) - // protect message writing - writeLock.Lock() - defer writeLock.Unlock() - if err != nil && result == nil { - // if the Run function generates an error, synthesize an application - // error - exc := NewApplicationException(INTERNAL_ERROR, "Internal error: "+err.Error()) - err, result = exc, exc - } - pfunc.Write(seqID, result, outputProtocol) - // ignore write failures explicitly. This emulates previous behavior - // we hope that the read will fail and the connection will be closed - // well. - }() - } - // graceful exit - return nil -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/context.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/context.go deleted file mode 100644 index 1a486c92..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/context.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "context" - "crypto/tls" - "net" -) - -type contextKey int - -const ( - connInfoKey contextKey = iota -) - -// ConnInfo contains connection information from clients of the SimpleServer. -type ConnInfo struct { - LocalAddr net.Addr - RemoteAddr net.Addr - - netConn net.Conn // set by thrift tcp servers - tlsState *tls.ConnectionState // set by thrift http servers -} - -// String implements the fmt.Stringer interface. -func (c ConnInfo) String() string { - return c.RemoteAddr.String() + " -> " + c.LocalAddr.String() -} - -// tlsConnectionStater is an abstract interface for types that can return -// the state of TLS connections. This is used to support not only tls.Conn -// but also custom wrappers such as permissive TLS/non-TLS sockets. -// -// Caveat: this interface has to support at least tls.Conn, which has -// the current signature for ConnectionState. Because of that, wrappers -// for permissive TLS/non-TLS may return an empty tls.ConnectionState. -type tlsConnectionStater interface { - ConnectionState() tls.ConnectionState -} - -// TLS returns the TLS connection state. -func (c ConnInfo) TLS() *tls.ConnectionState { - if c.tlsState != nil { - return c.tlsState - } - tlsConn, ok := c.netConn.(tlsConnectionStater) - if !ok { - return nil - } - cs := tlsConn.ConnectionState() - // See the caveat in tlsConnectionStater. - if cs.Version == 0 { - return nil - } - return &cs -} - -// ConnInfoFromContext extracts and returns ConnInfo from context. -func ConnInfoFromContext(ctx context.Context) (ConnInfo, bool) { - v, ok := ctx.Value(connInfoKey).(ConnInfo) - return v, ok -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/debug_protocol.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/debug_protocol.go deleted file mode 100644 index 1155c291..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/debug_protocol.go +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "log" -) - -type DebugProtocol struct { - Delegate Protocol - LogPrefix string -} - -type DebugProtocolFactory struct { - Underlying ProtocolFactory - LogPrefix string -} - -func NewDebugProtocolFactory(underlying ProtocolFactory, logPrefix string) *DebugProtocolFactory { - return &DebugProtocolFactory{ - Underlying: underlying, - LogPrefix: logPrefix, - } -} - -func (t *DebugProtocolFactory) GetProtocol(trans Transport) Protocol { - return &DebugProtocol{ - Delegate: t.Underlying.GetProtocol(trans), - LogPrefix: t.LogPrefix, - } -} - -func (tdp *DebugProtocol) WriteMessageBegin(name string, typeId MessageType, seqid int32) error { - err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid) - log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err) - return err -} -func (tdp *DebugProtocol) WriteMessageEnd() error { - err := tdp.Delegate.WriteMessageEnd() - log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *DebugProtocol) WriteStructBegin(name string) error { - err := tdp.Delegate.WriteStructBegin(name) - log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err) - return err -} -func (tdp *DebugProtocol) WriteStructEnd() error { - err := tdp.Delegate.WriteStructEnd() - log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *DebugProtocol) WriteFieldBegin(name string, typeId Type, id int16) error { - err := tdp.Delegate.WriteFieldBegin(name, typeId, id) - log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err) - return err -} -func (tdp *DebugProtocol) WriteFieldEnd() error { - err := tdp.Delegate.WriteFieldEnd() - log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *DebugProtocol) WriteFieldStop() error { - err := tdp.Delegate.WriteFieldStop() - log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *DebugProtocol) WriteMapBegin(keyType Type, valueType Type, size int) error { - err := tdp.Delegate.WriteMapBegin(keyType, valueType, size) - log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err) - return err -} -func (tdp *DebugProtocol) WriteMapEnd() error { - err := tdp.Delegate.WriteMapEnd() - log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *DebugProtocol) WriteListBegin(elemType Type, size int) error { - err := tdp.Delegate.WriteListBegin(elemType, size) - log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - return err -} -func (tdp *DebugProtocol) WriteListEnd() error { - err := tdp.Delegate.WriteListEnd() - log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *DebugProtocol) WriteSetBegin(elemType Type, size int) error { - err := tdp.Delegate.WriteSetBegin(elemType, size) - log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - return err -} -func (tdp *DebugProtocol) WriteSetEnd() error { - err := tdp.Delegate.WriteSetEnd() - log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *DebugProtocol) WriteBool(value bool) error { - err := tdp.Delegate.WriteBool(value) - log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *DebugProtocol) WriteByte(value byte) error { - err := tdp.Delegate.WriteByte(value) - log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *DebugProtocol) WriteI16(value int16) error { - err := tdp.Delegate.WriteI16(value) - log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *DebugProtocol) WriteI32(value int32) error { - err := tdp.Delegate.WriteI32(value) - log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *DebugProtocol) WriteI64(value int64) error { - err := tdp.Delegate.WriteI64(value) - log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *DebugProtocol) WriteDouble(value float64) error { - err := tdp.Delegate.WriteDouble(value) - log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *DebugProtocol) WriteFloat(value float32) error { - err := tdp.Delegate.WriteFloat(value) - log.Printf("%sWriteFloat(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *DebugProtocol) WriteString(value string) error { - err := tdp.Delegate.WriteString(value) - log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *DebugProtocol) WriteBinary(value []byte) error { - err := tdp.Delegate.WriteBinary(value) - log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} - -func (tdp *DebugProtocol) ReadMessageBegin() (name string, typeId MessageType, seqid int32, err error) { - name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin() - log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err) - return -} -func (tdp *DebugProtocol) ReadMessageEnd() (err error) { - err = tdp.Delegate.ReadMessageEnd() - log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *DebugProtocol) ReadStructBegin() (name string, err error) { - name, err = tdp.Delegate.ReadStructBegin() - log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err) - return -} -func (tdp *DebugProtocol) ReadStructEnd() (err error) { - err = tdp.Delegate.ReadStructEnd() - log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *DebugProtocol) ReadFieldBegin() (name string, typeId Type, id int16, err error) { - name, typeId, id, err = tdp.Delegate.ReadFieldBegin() - log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err) - return -} -func (tdp *DebugProtocol) ReadFieldEnd() (err error) { - err = tdp.Delegate.ReadFieldEnd() - log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *DebugProtocol) ReadMapBegin() (keyType Type, valueType Type, size int, err error) { - keyType, valueType, size, err = tdp.Delegate.ReadMapBegin() - log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err) - return -} -func (tdp *DebugProtocol) ReadMapEnd() (err error) { - err = tdp.Delegate.ReadMapEnd() - log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *DebugProtocol) ReadListBegin() (elemType Type, size int, err error) { - elemType, size, err = tdp.Delegate.ReadListBegin() - log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - return -} -func (tdp *DebugProtocol) ReadListEnd() (err error) { - err = tdp.Delegate.ReadListEnd() - log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *DebugProtocol) ReadSetBegin() (elemType Type, size int, err error) { - elemType, size, err = tdp.Delegate.ReadSetBegin() - log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - return -} -func (tdp *DebugProtocol) ReadSetEnd() (err error) { - err = tdp.Delegate.ReadSetEnd() - log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *DebugProtocol) ReadBool() (value bool, err error) { - value, err = tdp.Delegate.ReadBool() - log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *DebugProtocol) ReadByte() (value byte, err error) { - value, err = tdp.Delegate.ReadByte() - log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *DebugProtocol) ReadI16() (value int16, err error) { - value, err = tdp.Delegate.ReadI16() - log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *DebugProtocol) ReadI32() (value int32, err error) { - value, err = tdp.Delegate.ReadI32() - log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *DebugProtocol) ReadI64() (value int64, err error) { - value, err = tdp.Delegate.ReadI64() - log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *DebugProtocol) ReadDouble() (value float64, err error) { - value, err = tdp.Delegate.ReadDouble() - log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *DebugProtocol) ReadFloat() (value float32, err error) { - value, err = tdp.Delegate.ReadFloat() - log.Printf("%sReadFloat() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *DebugProtocol) ReadString() (value string, err error) { - value, err = tdp.Delegate.ReadString() - log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *DebugProtocol) ReadBinary() (value []byte, err error) { - value, err = tdp.Delegate.ReadBinary() - log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *DebugProtocol) Skip(fieldType Type) (err error) { - err = tdp.Delegate.Skip(fieldType) - log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err) - return -} -func (tdp *DebugProtocol) Flush() (err error) { - err = tdp.Delegate.Flush() - log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err) - return -} - -func (tdp *DebugProtocol) Transport() Transport { - return tdp.Delegate.Transport() -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/deserializer.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/deserializer.go deleted file mode 100644 index f301d186..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/deserializer.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -type Deserializer struct { - Transport Transport - Protocol Protocol -} - -func NewDeserializer() *Deserializer { - var transport Transport - transport = NewMemoryBufferLen(1024) - - protocol := NewBinaryProtocolFactoryDefault().GetProtocol(transport) - - return &Deserializer{ - transport, - protocol} -} - -func (t *Deserializer) ReadString(msg Struct, s string) (err error) { - err = nil - if _, err = t.Transport.Write([]byte(s)); err != nil { - return - } - if err = msg.Read(t.Protocol); err != nil { - return - } - return -} - -func (t *Deserializer) Read(msg Struct, b []byte) (err error) { - err = nil - if _, err = t.Transport.Write(b); err != nil { - return - } - if err = msg.Read(t.Protocol); err != nil { - return - } - return -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/exception.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/exception.go deleted file mode 100644 index 07fe0279..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/exception.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "errors" -) - -// Exception is a generic thrift exception -type Exception interface { - error -} - -// PrependError prepends additional information to an error without losing the thrift exception interface -func PrependError(prepend string, err error) error { - if t, ok := err.(TransportException); ok { - return NewTransportException(t.TypeID(), prepend+t.Error()) - } - if t, ok := err.(ProtocolException); ok { - return NewProtocolExceptionWithType(t.TypeID(), errors.New(prepend+err.Error())) - } - if t, ok := err.(ApplicationException); ok { - return NewApplicationException(t.TypeID(), prepend+t.Error()) - } - - return errors.New(prepend + err.Error()) -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/field.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/field.go deleted file mode 100644 index b77f0c5f..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/field.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -// Helper class that encapsulates field metadata. -type field struct { - name string - typeId Type - id int -} - -func newField(n string, t Type, i int) *field { - return &field{name: n, typeId: t, id: i} -} - -func (p *field) Name() string { - if p == nil { - return "" - } - return p.name -} - -func (p *field) TypeId() Type { - if p == nil { - return Type(VOID) - } - return p.typeId -} - -func (p *field) Id() int { - if p == nil { - return -1 - } - return p.id -} - -func (p *field) String() string { - if p == nil { - return "" - } - return "" -} - -var ANONYMOUS_FIELD *field - -type fieldSlice []field - -func (p fieldSlice) Len() int { - return len(p) -} - -func (p fieldSlice) Less(i, j int) bool { - return p[i].Id() < p[j].Id() -} - -func (p fieldSlice) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -func init() { - ANONYMOUS_FIELD = newField("", STOP, 0) -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/framed_transport.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/framed_transport.go deleted file mode 100644 index cc46358c..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/framed_transport.go +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "bufio" - "bytes" - "encoding/binary" - "fmt" - "io" -) - -const DEFAULT_MAX_LENGTH = 16384000 - -type FramedTransport struct { - transport Transport - buf bytes.Buffer - reader *bufio.Reader - frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header - buffer [4]byte - maxLength uint32 -} - -type framedTransportFactory struct { - factory TransportFactory - maxLength uint32 -} - -func NewFramedTransportFactory(factory TransportFactory) TransportFactory { - return &framedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH} -} - -func NewFramedTransportFactoryMaxLength(factory TransportFactory, maxLength uint32) TransportFactory { - return &framedTransportFactory{factory: factory, maxLength: maxLength} -} - -func (p *framedTransportFactory) GetTransport(base Transport) Transport { - return NewFramedTransportMaxLength(p.factory.GetTransport(base), p.maxLength) -} - -func NewFramedTransport(transport Transport) *FramedTransport { - return &FramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH} -} - -func NewFramedTransportMaxLength(transport Transport, maxLength uint32) *FramedTransport { - return &FramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength} -} - -func (p *FramedTransport) Open() error { - return p.transport.Open() -} - -func (p *FramedTransport) IsOpen() bool { - return p.transport.IsOpen() -} - -func (p *FramedTransport) Close() error { - return p.transport.Close() -} - -func (p *FramedTransport) Read(buf []byte) (l int, err error) { - if p.frameSize == 0 { - p.frameSize, err = p.readFrameHeader() - if err != nil { - return - } - } - if p.frameSize < uint32(len(buf)) { - frameSize := p.frameSize - tmp := make([]byte, p.frameSize) - l, err = p.Read(tmp) - copy(buf, tmp) - if err == nil { - err = NewTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf))) - return - } - } - got, err := p.reader.Read(buf) - p.frameSize = p.frameSize - uint32(got) - //sanity check - if p.frameSize < 0 { - return 0, NewTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size") - } - return got, NewTransportExceptionFromError(err) -} - -func (p *FramedTransport) ReadByte() (c byte, err error) { - if p.frameSize == 0 { - p.frameSize, err = p.readFrameHeader() - if err != nil { - return - } - } - if p.frameSize < 1 { - return 0, NewTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1)) - } - c, err = p.reader.ReadByte() - if err == nil { - p.frameSize-- - } - return -} - -func (p *FramedTransport) Write(buf []byte) (int, error) { - n, err := p.buf.Write(buf) - return n, NewTransportExceptionFromError(err) -} - -func (p *FramedTransport) WriteByte(c byte) error { - return p.buf.WriteByte(c) -} - -func (p *FramedTransport) WriteString(s string) (n int, err error) { - return p.buf.WriteString(s) -} - -func (p *FramedTransport) Flush() error { - size := p.buf.Len() - buf := p.buffer[:4] - binary.BigEndian.PutUint32(buf, uint32(size)) - _, err := p.transport.Write(buf) - if err != nil { - return NewTransportExceptionFromError(err) - } - if size > 0 { - if n, err := p.buf.WriteTo(p.transport); err != nil { - print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n") - return NewTransportExceptionFromError(err) - } - } - err = p.transport.Flush() - return NewTransportExceptionFromError(err) -} - -func (p *FramedTransport) readFrameHeader() (uint32, error) { - buf := p.buffer[:4] - if _, err := io.ReadFull(p.reader, buf); err != nil { - return 0, err - } - size := binary.BigEndian.Uint32(buf) - if size < 0 || size > p.maxLength { - return 0, NewTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size)) - } - return size, nil -} - -func (p *FramedTransport) RemainingBytes() (num_bytes uint64) { - return uint64(p.frameSize) -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/header.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/header.go deleted file mode 100644 index d7fa10d0..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/header.go +++ /dev/null @@ -1,698 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "bufio" - "bytes" - "compress/zlib" - "encoding/binary" - "fmt" - "io" -) - -// Header keys -const ( - IdentityHeader string = "identity" - IDVersionHeader string = "id_version" - IDVersion string = "1" - PriorityHeader string = "thrift_priority" - ClientTimeoutHeader string = "client_timeout" - QueueTimeoutHeader string = "queue_timeout" - // Header Magicks - // 0 and 16th bits must be 0 to differentiate from framed & unframed - HeaderMagic uint32 = 0x0FFF0000 - HeaderMask uint32 = 0xFFFF0000 - FlagsMask uint32 = 0x0000FFFF - HTTPServerMagic uint32 = 0x504F5354 // POST - HTTPClientMagic uint32 = 0x48545450 // HTTP - HTTPGetClientMagic uint32 = 0x47455420 // GET - HTTPHeadClientMagic uint32 = 0x48454144 // HEAD - BigFrameMagic uint32 = 0x42494746 // BIGF - MaxFrameSize uint32 = 0x3FFFFFFF - CommonHeaderSize uint64 = 10 - MaxHeaderSize uint32 = 131071 -) - -type ClientType int64 - -const ( - HeaderClientType ClientType = iota - FramedDeprecated - UnframedDeprecated - HTTPServerType - HTTPClientType - FramedCompact - HTTPGetClientType - UnknownClientType - UnframedCompactDeprecated -) - -func (c ClientType) String() string { - switch c { - case HeaderClientType: - return "Header" - case FramedDeprecated: - return "FramedDeprecated" - case UnframedDeprecated: - return "UnframedDeprecated" - case HTTPServerType: - return "HTTPServer" - case HTTPClientType: - return "HTTPClient" - case FramedCompact: - return "FramedCompact" - case HTTPGetClientType: - return "HTTPGet" - case UnframedCompactDeprecated: - return "UnframedCompactDeprecated" - case UnknownClientType: - fallthrough - default: - return "Unknown" - } -} - -type HeaderFlags uint32 - -const ( - HeaderFlagSupportOutOfOrder HeaderFlags = 0x01 - HeaderFlagDuplexReverse HeaderFlags = 0x08 -) - -type InfoIDType uint32 - -const ( - InfoIDPadding InfoIDType = 0 - InfoIDKeyValue InfoIDType = 1 - InfoIDPKeyValue InfoIDType = 2 -) - -// TransformID Numerical ID of transform function -type TransformID uint32 - -const ( - // TransformNone Default null transform - TransformNone TransformID = 0 - // TransformZlib Apply zlib compression - TransformZlib TransformID = 1 - // TransformHMAC Deprecated and no longer supported - TransformHMAC TransformID = 2 - // TransformSnappy Apply snappy compression - TransformSnappy TransformID = 3 - // TransformQLZ Deprecated and no longer supported - TransformQLZ TransformID = 4 - // TransformZstd Apply zstd compression - TransformZstd TransformID = 5 -) - -func (c TransformID) String() string { - switch c { - case TransformNone: - return "none" - case TransformZlib: - return "zlib" - case TransformHMAC: - return "hmac" - case TransformSnappy: - return "snappy" - case TransformQLZ: - return "qlz" - case TransformZstd: - return "zstd" - default: - return "unknown" - } -} - -var supportedTransforms = map[TransformID]bool{ - TransformNone: true, - TransformZlib: true, - TransformHMAC: false, - TransformSnappy: false, - TransformQLZ: false, - TransformZstd: zstdTransformSupport, -} - -// Untransformer will find a transform function to wrap a reader with to transformed the data. -func (c TransformID) Untransformer() (func(byteReader) (byteReader, error), error) { - switch c { - case TransformNone: - return func(rd byteReader) (byteReader, error) { - return rd, nil - }, nil - case TransformZlib: - return func(rd byteReader) (byteReader, error) { - zlrd, err := zlib.NewReader(rd) - if err != nil { - return nil, err - } - return ensureByteReader(zlrd), nil - }, nil - case TransformZstd: - return zstdRead, nil - default: - return nil, NewProtocolExceptionWithType( - NOT_IMPLEMENTED, fmt.Errorf("Header transform %s not supported", c.String()), - ) - } -} - -type tHeader struct { - length uint64 - flags uint16 - seq uint32 - headerLen uint16 - payloadLen uint64 - - protoID ProtocolID - transforms []TransformID - - // Map to use for headers - headers map[string]string - pHeaders map[string]string - - // clientType Negotiated client type - clientType ClientType -} - -// byteReader Combined interface to expose original ReadByte calls -type byteReader interface { - io.Reader - io.ByteReader -} - -// ensureByteReader If a reader does not implement ReadByte, wrap it with a -// buffer that can. Needed for most thrift interfaces. -func ensureByteReader(rd io.Reader) byteReader { - if brr, ok := rd.(byteReader); ok { - return brr - } - return bufio.NewReader(rd) -} - -// limitedByteReader Keep the ByteReader interface when wrapping with a limit -type limitedByteReader struct { - io.LimitedReader - // Copy of the original interface given to us that implemented ByteReader - orig byteReader -} - -func newLimitedByteReader(rd byteReader, n int64) *limitedByteReader { - return &limitedByteReader{ - LimitedReader: io.LimitedReader{R: rd, N: n}, orig: rd, - } -} - -func (r *limitedByteReader) ReadByte() (byte, error) { - if r.N <= 0 { - return '0', io.EOF - } - b, err := r.orig.ReadByte() - r.N-- - return b, err -} - -func readVarString(buf byteReader) (string, error) { - strlen, err := binary.ReadUvarint(buf) - if err != nil { - return "", fmt.Errorf("tHeader: error reading len of kv string: %s", err.Error()) - } - - strbuf := make([]byte, strlen) - _, err = io.ReadFull(buf, strbuf) - if err != nil { - return "", fmt.Errorf("tHeader: error reading kv string: %s", err.Error()) - } - return string(strbuf), nil -} - -// readHeaderMaps Consume a set of key/value pairs from the buffer -func readInfoHeaderSet(buf byteReader) (map[string]string, error) { - headers := map[string]string{} - numkvs, err := binary.ReadUvarint(buf) - if err != nil { - return nil, fmt.Errorf("tHeader: error reading number of keyvalues: %s", err.Error()) - } - - for i := uint64(0); i < numkvs; i++ { - key, err := readVarString(buf) - if err != nil { - return nil, fmt.Errorf("tHeader: error reading keyvalue key: %s", err.Error()) - } - val, err := readVarString(buf) - if err != nil { - return nil, fmt.Errorf("tHeader: error reading keyvalue val: %s", err.Error()) - } - headers[key] = val - } - return headers, nil -} - -// readTransforms Consume a size delimited transform set from the buffer -// If the there is an unknown or unsupported transform we will bail out. -func readTransforms(buf byteReader) ([]TransformID, error) { - transforms := []TransformID{} - - numtransforms, err := binary.ReadUvarint(buf) - if err != nil { - return nil, NewTransportExceptionFromError( - fmt.Errorf("tHeader: error reading number of transforms: %s", err.Error()), - ) - } - - // Read transforms - for i := uint64(0); i < numtransforms; i++ { - transformID, err := binary.ReadUvarint(buf) - if err != nil { - return nil, NewTransportExceptionFromError( - fmt.Errorf("tHeader: error reading transforms: %s", err.Error()), - ) - } - tid := TransformID(transformID) - if supported, ok := supportedTransforms[tid]; ok { - if supported { - transforms = append(transforms, tid) - } else { - return nil, NewTransportExceptionFromError( - fmt.Errorf("tHeader: unsupported transform: %s", tid.String()), - ) - } - } else { - return nil, NewTransportExceptionFromError( - fmt.Errorf("tHeader: unknown transform ID: %#x", tid), - ) - } - } - return transforms, nil -} - -// readInfoHeaders Read the K/V headers at the end of the header -// This will keep consuming bytes until the buffer returns EOF -func readInfoHeaders(buf byteReader) (map[string]string, map[string]string, error) { - // var err error - infoheaders := map[string]string{} - infopHeaders := map[string]string{} - - for { - infoID, err := binary.ReadUvarint(buf) - - // this is the last field, read until there is no more padding - if err == io.EOF { - break - } - - if err != nil { - return nil, nil, NewTransportExceptionFromError( - fmt.Errorf("tHeader: error reading infoID: %s", err.Error()), - ) - } - - switch InfoIDType(infoID) { - case InfoIDPadding: - continue - case InfoIDKeyValue: - hdrs, err := readInfoHeaderSet(buf) - if err != nil { - return nil, nil, err - } - for k, v := range hdrs { - infoheaders[k] = v - } - case InfoIDPKeyValue: - hdrs, err := readInfoHeaderSet(buf) - if err != nil { - return nil, nil, err - } - for k, v := range hdrs { - infopHeaders[k] = v - } - default: - return nil, nil, NewTransportExceptionFromError( - fmt.Errorf("tHeader: error reading infoIDType: %#x", infoID), - ) - } - } - return infoheaders, infopHeaders, nil -} - -// readVarHeader Read the variable-length trailing header -func (hdr *tHeader) readVarHeader(buf byteReader) error { - // Read protocol ID - protoID, err := binary.ReadUvarint(buf) - if err != nil { - return NewTransportExceptionFromError( - fmt.Errorf("tHeader: error reading protocol ID: %s", err.Error()), - ) - } - hdr.protoID = ProtocolID(protoID) - hdr.transforms, err = readTransforms(buf) - if err != nil { - return err - } - - hdr.headers, hdr.pHeaders, err = readInfoHeaders(buf) - if err != nil { - return err - } - - return nil -} - -// isCompactFramed Check if the magic value corresponds to compact proto -func isCompactFramed(magic uint32) bool { - protocolID := int8(magic >> 24) - protocolVersion := int8((magic >> 16) & uint32(COMPACT_VERSION_MASK)) - return uint8(protocolID) == uint8(COMPACT_PROTOCOL_ID) && (protocolVersion == int8(COMPACT_VERSION) || - protocolVersion == int8(COMPACT_VERSION_BE)) -} - -// analyzeFirst32Bit Guess client type from the first 4 bytes -func analyzeFirst32Bit(word uint32) ClientType { - if (word & BinaryVersionMask) == BinaryVersion1 { - return UnframedDeprecated - } else if isCompactFramed(word) { - return UnframedCompactDeprecated - } else if word == HTTPServerMagic || - word == HTTPGetClientMagic || - word == HTTPHeadClientMagic { - return HTTPServerType - } else if word == HTTPClientMagic { - return HTTPClientType - } - return UnknownClientType -} - -// analyzeSecond32Bit Find the header client type from the 4-8th bytes of header -func analyzeSecond32Bit(word uint32) ClientType { - if (word & BinaryVersionMask) == BinaryVersion1 { - return FramedDeprecated - } - if isCompactFramed(word) { - return FramedCompact - } - if (word & HeaderMask) == HeaderMagic { - return HeaderClientType - } - return UnknownClientType -} - -// checkFramed If the client type is framed, set appropriate protocolID in -// the header. Otherwise, return an unknown transport error. -func checkFramed(hdr *tHeader, clientType ClientType) error { - switch clientType { - case FramedDeprecated: - hdr.protoID = ProtocolIDBinary - hdr.clientType = clientType - hdr.payloadLen = hdr.length - return nil - case FramedCompact: - hdr.protoID = ProtocolIDCompact - hdr.clientType = clientType - hdr.payloadLen = hdr.length - return nil - default: - return NewProtocolExceptionWithType( - NOT_IMPLEMENTED, fmt.Errorf("Transport %s not supported on tHeader", clientType), - ) - } -} - -// readHeaderInfo Consume header information from the buffer -func (hdr *tHeader) Read(buf *bufio.Reader) error { - var ( - err error - firstword uint32 - secondword uint32 - wordbuf []byte - ) - - if wordbuf, err = buf.Peek(4); err != nil { - return NewTransportExceptionFromError(err) - } - firstword = binary.BigEndian.Uint32(wordbuf) - - // Check the first word if it matches http/unframed signatures - // We don't support non-framed protocols, so bail out - switch clientType := analyzeFirst32Bit(firstword); clientType { - case UnknownClientType: - break - default: - return NewTransportExceptionFromError( - fmt.Errorf("Transport %s not supported on tHeader (word=%#x)", clientType, firstword), - ) - } - - // From here on out, all protocols supported are frame-based. First word is length. - hdr.length = uint64(firstword) - if firstword > MaxFrameSize { - return NewTransportExceptionFromError( - fmt.Errorf("BigFrames not supported: got size %d", firstword), - ) - } - - // First word is always length, discard. - _, err = buf.Discard(4) - if err != nil { - // Shouldn't be possible to fail here, but check anyways - return NewTransportExceptionFromError(err) - } - - // Only peek here. If it was framed transport, we are now reading payload. - if wordbuf, err = buf.Peek(4); err != nil { - return NewTransportExceptionFromError(err) - } - secondword = binary.BigEndian.Uint32(wordbuf) - - // Check if we can detect a framed proto, and bail out if we do. - if clientType := analyzeSecond32Bit(secondword); clientType != HeaderClientType { - return checkFramed(hdr, clientType) - } - - // It was not framed proto, assume header and discard that word. - _, err = buf.Discard(4) - if err != nil { - // Shouldn't be possible to fail here, but check anyways - return NewTransportExceptionFromError(err) - } - - // Assume header protocol from here on in, parse rest of header - hdr.flags = uint16(secondword & FlagsMask) - err = binary.Read(buf, binary.BigEndian, &hdr.seq) - if err != nil { - return NewTransportExceptionFromError(err) - } - - err = binary.Read(buf, binary.BigEndian, &hdr.headerLen) - if err != nil { - return NewTransportExceptionFromError(err) - } - - if uint32(hdr.headerLen*4) > MaxHeaderSize { - return NewTransportExceptionFromError( - fmt.Errorf("invalid header length: %d", int64(hdr.headerLen*4)), - ) - } - - // The length of the payload without the header (fixed is 10) - hdr.payloadLen = hdr.length - 10 - uint64(hdr.headerLen*4) - - // Limit the reader for the header so we can't overrun - limbuf := newLimitedByteReader(buf, int64(hdr.headerLen*4)) - hdr.clientType = HeaderClientType - return hdr.readVarHeader(limbuf) -} - -func writeTransforms(transforms []TransformID, buf io.Writer) (int, error) { - size := 0 - n, err := writeUvarint(uint64(len(transforms)), buf) - size += n - if err != nil { - return size, err - } - - if transforms == nil { - return size, nil - } - - for _, trans := range transforms { - // FIXME: We should only write supported xforms - n, err = writeUvarint(uint64(trans), buf) - size += n - if err != nil { - return size, err - } - } - return size, nil -} - -func writeUvarint(v uint64, buf io.Writer) (int, error) { - var b [10]byte - n := binary.PutUvarint(b[:], v) - return buf.Write(b[:n]) -} - -func writeVarString(s string, buf io.Writer) (int, error) { - n, err := writeUvarint(uint64(len(s)), buf) - if err != nil { - return n, err - } - n2, err := buf.Write([]byte(s)) - return n + n2, err -} - -func writeInfoHeaders(headers map[string]string, infoidtype InfoIDType, buf io.Writer) (int, error) { - cnt := len(headers) - size := 0 - if cnt < 1 { - return 0, nil - } - - n, err := writeUvarint(uint64(infoidtype), buf) - size += n - if err != nil { - return 0, err - } - - n, err = writeUvarint(uint64(cnt), buf) - size += n - if err != nil { - return 0, err - } - - for k, v := range headers { - n, err = writeVarString(k, buf) - size += n - if err != nil { - return 0, err - } - - n, err = writeVarString(v, buf) - size += n - if err != nil { - return 0, err - } - } - - return size, nil -} - -func (hdr *tHeader) writeVarHeader(buf io.Writer) (int, error) { - size := 0 - n, err := writeUvarint(uint64(hdr.protoID), buf) - size += n - if err != nil { - return size, err - } - - n, err = writeTransforms(hdr.transforms, buf) - size += n - if err != nil { - return size, err - } - - n, err = writeInfoHeaders(hdr.pHeaders, InfoIDPKeyValue, buf) - size += n - if err != nil { - return size, err - } - - n, err = writeInfoHeaders(hdr.headers, InfoIDKeyValue, buf) - size += n - if err != nil { - return size, err - } - - padding := (4 - size%4) % 4 - for i := 0; i < padding; i++ { - buf.Write([]byte{byte(0)}) - size++ - } - - return size, err -} - -func (hdr *tHeader) calcLenFromPayload() error { - fixedlen := uint64(0) - switch hdr.clientType { - case FramedCompact: - hdr.length = hdr.payloadLen - return nil - case FramedDeprecated: - hdr.length = hdr.payloadLen - return nil - case HeaderClientType: - // TODO: Changes with bigframes - fixedlen = 10 - default: - return NewApplicationException( - UNKNOWN_TRANSPORT_EXCEPTION, - fmt.Sprintf("cannot get length of non-framed transport %s", hdr.clientType.String()), - ) - } - framesize := uint64(hdr.payloadLen + fixedlen + uint64(hdr.headerLen)*4) - // FIXME: support bigframes - if framesize > uint64(MaxFrameSize) { - return NewTransportException( - INVALID_FRAME_SIZE, - fmt.Sprintf("cannot send bigframe of size %d", framesize), - ) - } - hdr.length = framesize - return nil -} - -// Write Write out the header, requires payloadLen be set. -func (hdr *tHeader) Write(buf io.Writer) error { - // Make a reasonably sized temp buffer for the variable header - hdrbuf := bytes.NewBuffer(nil) - _, err := hdr.writeVarHeader(hdrbuf) - if err != nil { - return err - } - - if (hdrbuf.Len() % 4) > 0 { - return NewTransportException( - INVALID_FRAME_SIZE, fmt.Sprintf("unable to write header of size %d (must be multiple of 4)", hdr.headerLen), - ) - } - if hdrbuf.Len() > int(MaxHeaderSize) { - return NewApplicationException( - INVALID_FRAME_SIZE, fmt.Sprintf("unable to write header of size %d (max is %d)", hdrbuf.Len(), MaxHeaderSize), - ) - } - hdr.headerLen = uint16(hdrbuf.Len() / 4) - - err = hdr.calcLenFromPayload() - if err != nil { - return err - } - - // FIXME: Bad assumption (no err check), but we should be writing to an in-memory buffer here - binary.Write(buf, binary.BigEndian, uint32(hdr.length)) - binary.Write(buf, binary.BigEndian, uint16(HeaderMagic>>16)) - binary.Write(buf, binary.BigEndian, hdr.flags) - binary.Write(buf, binary.BigEndian, hdr.seq) - binary.Write(buf, binary.BigEndian, hdr.headerLen) - hdrbuf.WriteTo(buf) - - return nil -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/header_protocol.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/header_protocol.go deleted file mode 100644 index 07a6af5d..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/header_protocol.go +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "fmt" -) - -type HeaderProtocol struct { - Protocol - origTransport Transport - trans *HeaderTransport - - protoID ProtocolID -} - -type HeaderProtocolFactory struct{} - -func NewHeaderProtocolFactory() *HeaderProtocolFactory { - return &HeaderProtocolFactory{} -} - -func (p *HeaderProtocolFactory) GetProtocol(trans Transport) Protocol { - return NewHeaderProtocol(trans) -} - -func NewHeaderProtocol(trans Transport) *HeaderProtocol { - p := &HeaderProtocol{ - origTransport: trans, - protoID: ProtocolIDCompact, - } - if et, ok := trans.(*HeaderTransport); ok { - p.trans = et - } else { - p.trans = NewHeaderTransport(trans) - } - - // Effectively an invariant violation. - if err := p.ResetProtocol(); err != nil { - panic(err) - } - return p -} - -func (p *HeaderProtocol) ResetProtocol() error { - if p.Protocol != nil && p.protoID == p.trans.ProtocolID() { - return nil - } - - p.protoID = p.trans.ProtocolID() - switch p.protoID { - case ProtocolIDBinary: - // These defaults match cpp implementation - p.Protocol = NewBinaryProtocol(p.trans, false, true) - case ProtocolIDCompact: - p.Protocol = NewCompactProtocol(p.trans) - default: - return NewProtocolException(fmt.Errorf("Unknown protocol id: %#x", p.protoID)) - } - return nil -} - -// -// Writing methods. -// - -func (p *HeaderProtocol) WriteMessageBegin(name string, typeId MessageType, seqid int32) error { - p.ResetProtocol() - // FIXME: Python is doing this -- don't know if it's correct. - // Should we be using this seqid or the header's? - if typeId == CALL || typeId == ONEWAY { - p.trans.SetSeqID(uint32(seqid)) - } - return p.Protocol.WriteMessageBegin(name, typeId, seqid) -} - -// -// Reading methods. -// - -func (p *HeaderProtocol) ReadMessageBegin() (name string, typeId MessageType, seqid int32, err error) { - if typeId == INVALID_MESSAGE_TYPE { - if err = p.trans.ResetProtocol(); err != nil { - return name, EXCEPTION, seqid, err - } - } - - err = p.ResetProtocol() - if err != nil { - return name, EXCEPTION, seqid, err - } - - return p.Protocol.ReadMessageBegin() -} - -func (p *HeaderProtocol) Flush() (err error) { - return NewProtocolException(p.trans.Flush()) -} - -func (p *HeaderProtocol) Skip(fieldType Type) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *HeaderProtocol) Transport() Transport { - return p.origTransport -} - -func (p *HeaderProtocol) HeaderTransport() Transport { - return p.trans -} - -// Control underlying header transport - -func (p *HeaderProtocol) SetIdentity(identity string) { - p.trans.SetIdentity(identity) -} - -func (p *HeaderProtocol) Identity() string { - return p.trans.Identity() -} - -func (p *HeaderProtocol) PeerIdentity() string { - return p.trans.PeerIdentity() -} - -func (p *HeaderProtocol) SetPersistentHeader(key, value string) { - p.trans.SetPersistentHeader(key, value) -} - -func (p *HeaderProtocol) PersistentHeader(key string) (string, bool) { - return p.trans.PersistentHeader(key) -} - -func (p *HeaderProtocol) PersistentHeaders() map[string]string { - return p.trans.PersistentHeaders() -} - -func (p *HeaderProtocol) ClearPersistentHeaders() { - p.trans.ClearPersistentHeaders() -} - -func (p *HeaderProtocol) SetHeader(key, value string) { - p.trans.SetHeader(key, value) -} - -func (p *HeaderProtocol) Header(key string) (string, bool) { - return p.trans.Header(key) -} - -func (p *HeaderProtocol) Headers() map[string]string { - return p.trans.Headers() -} - -func (p *HeaderProtocol) ClearHeaders() { - p.trans.ClearHeaders() -} - -func (p *HeaderProtocol) ReadHeader(key string) (string, bool) { - return p.trans.ReadHeader(key) -} - -func (p *HeaderProtocol) ReadHeaders() map[string]string { - return p.trans.ReadHeaders() -} - -func (p *HeaderProtocol) ProtocolID() ProtocolID { - return p.protoID -} - -func (p *HeaderProtocol) AddTransform(trans TransformID) error { - return p.trans.AddTransform(trans) -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/header_transport.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/header_transport.go deleted file mode 100644 index c72e0857..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/header_transport.go +++ /dev/null @@ -1,460 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "bufio" - "bytes" - "compress/zlib" - "encoding/binary" - "fmt" - "io/ioutil" -) - -const ( - DefaulprotoID = ProtocolIDCompact - DefaultClientType = HeaderClientType -) - -type tHeaderTransportFactory struct { - factory TransportFactory -} - -func NewHeaderTransportFactory(factory TransportFactory) TransportFactory { - return &tHeaderTransportFactory{factory: factory} -} - -func (p *tHeaderTransportFactory) GetTransport(base Transport) Transport { - return NewHeaderTransport(base) -} - -type HeaderTransport struct { - transport Transport - - // Used on read - rbuf *bufio.Reader - framebuf byteReader - readHeader *tHeader - // remaining bytes in the current frame. If 0, read in a new frame. - frameSize uint64 - - // Used on write - wbuf *bytes.Buffer - identity string - writeInfoHeaders map[string]string - persistentWriteInfoHeaders map[string]string - - // Negotiated - protoID ProtocolID - seqID uint32 - flags uint16 - clientType ClientType - writeTransforms []TransformID -} - -// NewHeaderTransport Create a new transport with defaults. -func NewHeaderTransport(transport Transport) *HeaderTransport { - return &HeaderTransport{ - transport: transport, - rbuf: bufio.NewReader(transport), - framebuf: newLimitedByteReader(bytes.NewReader(nil), 0), - frameSize: 0, - - wbuf: bytes.NewBuffer(nil), - writeInfoHeaders: map[string]string{}, - persistentWriteInfoHeaders: map[string]string{}, - - protoID: DefaulprotoID, - flags: 0, - clientType: DefaultClientType, - writeTransforms: []TransformID{}, - } -} - -func (t *HeaderTransport) SetSeqID(seq uint32) { - t.seqID = seq -} - -func (t *HeaderTransport) SeqID() uint32 { - return t.seqID -} - -func (t *HeaderTransport) Identity() string { - return t.identity -} - -func (t *HeaderTransport) SetIdentity(identity string) { - t.identity = identity -} - -func (t *HeaderTransport) PeerIdentity() string { - v, ok := t.ReadHeader(IdentityHeader) - vers, versok := t.ReadHeader(IDVersionHeader) - if ok && versok && vers == IDVersion { - return v - } - return "" -} - -func (t *HeaderTransport) SetPersistentHeader(key, value string) { - t.persistentWriteInfoHeaders[key] = value -} - -func (t *HeaderTransport) PersistentHeader(key string) (string, bool) { - v, ok := t.persistentWriteInfoHeaders[key] - return v, ok -} - -func (t *HeaderTransport) PersistentHeaders() map[string]string { - res := map[string]string{} - for k, v := range t.persistentWriteInfoHeaders { - res[k] = v - } - return res -} - -func (t *HeaderTransport) ClearPersistentHeaders() { - if len(t.persistentWriteInfoHeaders) != 0 { - t.persistentWriteInfoHeaders = map[string]string{} - } -} - -func (t *HeaderTransport) SetHeader(key, value string) { - t.writeInfoHeaders[key] = value -} - -func (t *HeaderTransport) Header(key string) (string, bool) { - v, ok := t.writeInfoHeaders[key] - return v, ok -} - -func (t *HeaderTransport) Headers() map[string]string { - res := map[string]string{} - for k, v := range t.writeInfoHeaders { - res[k] = v - } - return res -} - -func (t *HeaderTransport) ClearHeaders() { - if len(t.writeInfoHeaders) != 0 { - t.writeInfoHeaders = map[string]string{} - } -} - -func (t *HeaderTransport) ReadHeader(key string) (string, bool) { - if t.readHeader == nil { - return "", false - } - // per the C++ implementation, prefer persistent headers - if v, ok := t.readHeader.pHeaders[key]; ok { - return v, ok - } - v, ok := t.readHeader.headers[key] - return v, ok -} - -func (t *HeaderTransport) ReadHeaders() map[string]string { - res := map[string]string{} - if t.readHeader == nil { - return res - } - for k, v := range t.readHeader.headers { - res[k] = v - } - for k, v := range t.readHeader.pHeaders { - res[k] = v - } - return res -} - -func (t *HeaderTransport) ProtocolID() ProtocolID { - return t.protoID -} - -func (t *HeaderTransport) SetProtocolID(protoID ProtocolID) error { - if !(protoID == ProtocolIDBinary || protoID == ProtocolIDCompact) { - return NewTransportException( - NOT_IMPLEMENTED, fmt.Sprintf("unimplemented proto ID: %s (%#x)", protoID.String(), int64(protoID)), - ) - } - t.protoID = protoID - return nil -} - -func (t *HeaderTransport) AddTransform(trans TransformID) error { - if sup, ok := supportedTransforms[trans]; !ok || !sup { - return NewTransportException( - NOT_IMPLEMENTED, fmt.Sprintf("unimplemented transform ID: %s (%#x)", trans.String(), int64(trans)), - ) - } - for _, t := range t.writeTransforms { - if t == trans { - return nil - } - } - t.writeTransforms = append(t.writeTransforms, trans) - return nil -} - -// applyUntransform Fully read the frame and untransform into a local buffer -// we need to know the full size of the untransformed data -func (t *HeaderTransport) applyUntransform() error { - out, err := ioutil.ReadAll(t.framebuf) - if err != nil { - return err - } - t.frameSize = uint64(len(out)) - t.framebuf = newLimitedByteReader(bytes.NewBuffer(out), int64(len(out))) - return nil -} - -// ResetProtocol Needs to be called between every frame receive (BeginMessageRead) -// We do this to read out the header for each frame. This contains the length of the -// frame and protocol / metadata info. -func (t *HeaderTransport) ResetProtocol() error { - t.readHeader = nil - // TODO(carlverge): We should probably just read in the whole - // frame here. A bit of extra memory, probably a lot less CPU. - // Needs benchmark to test. - - hdr := &tHeader{} - // Consume the header from the input stream - err := hdr.Read(t.rbuf) - if err != nil { - return NewTransportExceptionFromError(err) - } - - // Set new header - t.readHeader = hdr - // Adopt the client's protocol - t.protoID = hdr.protoID - t.clientType = hdr.clientType - t.seqID = hdr.seq - t.flags = hdr.flags - - // Make sure we can't read past the current frame length - t.frameSize = hdr.payloadLen - t.framebuf = newLimitedByteReader(t.rbuf, int64(hdr.payloadLen)) - - for _, trans := range hdr.transforms { - xformer, terr := trans.Untransformer() - if terr != nil { - return NewTransportExceptionFromError(terr) - } - - t.framebuf, terr = xformer(t.framebuf) - if terr != nil { - return NewTransportExceptionFromError(terr) - } - } - - // Fully read the frame and apply untransforms if we have them - if len(hdr.transforms) > 0 { - err = t.applyUntransform() - if err != nil { - return NewTransportExceptionFromError(err) - } - } - - // respond in kind with the client's transforms - t.writeTransforms = hdr.transforms - - return nil -} - -// Open Open the internal transport -func (t *HeaderTransport) Open() error { - return t.transport.Open() -} - -// IsOpen Is the current transport open -func (t *HeaderTransport) IsOpen() bool { - return t.transport.IsOpen() -} - -// Close Close the internal transport -func (t *HeaderTransport) Close() error { - return t.transport.Close() -} - -// Read Read from the current framebuffer. EOF if the frame is done. -func (t *HeaderTransport) Read(buf []byte) (int, error) { - n, err := t.framebuf.Read(buf) - // Shouldn't be possibe, but just in case the frame size was flubbed - if uint64(n) > t.frameSize { - n = int(t.frameSize) - } - t.frameSize -= uint64(n) - return n, err -} - -// ReadByte Read a single byte from the current framebuffer. EOF if the frame is done. -func (t *HeaderTransport) ReadByte() (byte, error) { - b, err := t.framebuf.ReadByte() - t.frameSize-- - return b, err -} - -// Write Write multiple bytes to the framebuffer, does not send to transport. -func (t *HeaderTransport) Write(buf []byte) (int, error) { - n, err := t.wbuf.Write(buf) - return n, NewTransportExceptionFromError(err) -} - -// WriteByte Write a single byte to the framebuffer, does not send to transport. -func (t *HeaderTransport) WriteByte(c byte) error { - err := t.wbuf.WriteByte(c) - return NewTransportExceptionFromError(err) -} - -// WriteString Write a string to the framebuffer, does not send to transport. -func (t *HeaderTransport) WriteString(s string) (int, error) { - n, err := t.wbuf.WriteString(s) - return n, NewTransportExceptionFromError(err) -} - -// RemainingBytes Return how many bytes remain in the current recv framebuffer. -func (t *HeaderTransport) RemainingBytes() uint64 { - return t.frameSize -} - -func applyTransforms(buf *bytes.Buffer, transforms []TransformID) (*bytes.Buffer, error) { - if len(transforms) == 0 { - return buf, nil - } - - tmpbuf := bytes.NewBuffer(nil) - for _, trans := range transforms { - switch trans { - case TransformZlib: - zwr := zlib.NewWriter(tmpbuf) - _, err := buf.WriteTo(zwr) - if err != nil { - return nil, err - } - err = zwr.Close() - if err != nil { - return nil, err - } - buf, tmpbuf = tmpbuf, buf - tmpbuf.Reset() - case TransformZstd: - err := zstdWriter(tmpbuf, buf) - if err != nil { - return nil, err - } - buf, tmpbuf = tmpbuf, buf - tmpbuf.Reset() - default: - return nil, NewTransportException( - NOT_IMPLEMENTED, fmt.Sprintf("unimplemented transform ID: %s (%#x)", trans.String(), int64(trans)), - ) - } - } - return buf, nil -} - -func (t *HeaderTransport) flushHeader() error { - hdr := tHeader{} - hdr.headers = t.writeInfoHeaders - hdr.pHeaders = t.persistentWriteInfoHeaders - hdr.protoID = t.protoID - hdr.clientType = t.clientType - hdr.seq = t.seqID - hdr.flags = t.flags - hdr.transforms = t.writeTransforms - - if t.identity != "" { - hdr.headers[IdentityHeader] = t.identity - hdr.headers[IDVersionHeader] = IDVersion - } - - outbuf, err := applyTransforms(t.wbuf, t.writeTransforms) - if err != nil { - return NewTransportExceptionFromError(err) - } - t.wbuf = outbuf - - hdr.payloadLen = uint64(t.wbuf.Len()) - err = hdr.calcLenFromPayload() - if err != nil { - return NewTransportExceptionFromError(err) - } - - err = hdr.Write(t.transport) - return NewTransportExceptionFromError(err) -} - -func (t *HeaderTransport) flushFramed() error { - buflen := t.wbuf.Len() - framesize := uint32(buflen) - if buflen > int(MaxFrameSize) { - return NewTransportException( - INVALID_FRAME_SIZE, - fmt.Sprintf("cannot send bigframe of size %d", buflen), - ) - } - - err := binary.Write(t.transport, binary.BigEndian, framesize) - return NewTransportExceptionFromError(err) -} - -func (t *HeaderTransport) Flush() error { - var err error - - switch t.clientType { - case HeaderClientType: - err = t.flushHeader() - case FramedDeprecated: - err = t.flushFramed() - case FramedCompact: - err = t.flushFramed() - default: - t.wbuf.Reset() // reset incase wbuf pointer changes in xform - return NewTransportException( - UNKNOWN_TRANSPORT_EXCEPTION, - fmt.Sprintf("tHeader cannot flush for clientType %s", t.clientType.String()), - ) - } - - if err != nil { - t.wbuf.Reset() // reset incase wbuf pointer changes in xform - return err - } - - // Writeout the payload - if t.wbuf.Len() > 0 { - _, err = t.wbuf.WriteTo(t.transport) - if err != nil { - t.wbuf.Reset() // reset on return - return NewTransportExceptionFromError(err) - } - } - - // Remove the non-persistent headers on flush - t.ClearHeaders() - - err = t.transport.Flush() - - t.wbuf.Reset() // reset incase wbuf pointer changes in xform - return NewTransportExceptionFromError(err) -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/http_client.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/http_client.go deleted file mode 100644 index ae4ee952..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/http_client.go +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" -) - -// Default to using the shared http client. Library users are -// free to change this global client or specify one through -// HTTPClientOptions. -var DefaultHTTPClient *http.Client = http.DefaultClient - -type HTTPClient struct { - client *http.Client - response *http.Response - url *url.URL - requestBuffer *bytes.Buffer - header http.Header - nsecConnectTimeout int64 - nsecReadTimeout int64 -} - -type HTTPClientTransportFactory struct { - options HTTPClientOptions - url string - isPost bool -} - -func (p *HTTPClientTransportFactory) GetTransport(trans Transport) Transport { - if trans != nil { - t, ok := trans.(*HTTPClient) - if ok && t.url != nil { - if t.requestBuffer != nil { - t2, _ := NewHTTPPostClientWithOptions(t.url.String(), p.options) - return t2 - } - t2, _ := NewHTTPClientWithOptions(t.url.String(), p.options) - return t2 - } - } - if p.isPost { - s, _ := NewHTTPPostClientWithOptions(p.url, p.options) - return s - } - s, _ := NewHTTPClientWithOptions(p.url, p.options) - return s -} - -type HTTPClientOptions struct { - // If nil, DefaultHTTPClient is used - Client *http.Client -} - -func NewHTTPClientTransportFactory(url string) *HTTPClientTransportFactory { - return NewHTTPClientTransportFactoryWithOptions(url, HTTPClientOptions{}) -} - -func NewHTTPClientTransportFactoryWithOptions(url string, options HTTPClientOptions) *HTTPClientTransportFactory { - return &HTTPClientTransportFactory{url: url, isPost: false, options: options} -} - -func NewHTTPPostClientTransportFactory(url string) *HTTPClientTransportFactory { - return NewHTTPPostClientTransportFactoryWithOptions(url, HTTPClientOptions{}) -} - -func NewHTTPPostClientTransportFactoryWithOptions(url string, options HTTPClientOptions) *HTTPClientTransportFactory { - return &HTTPClientTransportFactory{url: url, isPost: true, options: options} -} - -func NewHTTPClientWithOptions(urlstr string, options HTTPClientOptions) (Transport, error) { - parsedURL, err := url.Parse(urlstr) - if err != nil { - return nil, err - } - response, err := http.Get(urlstr) - if err != nil { - return nil, err - } - client := options.Client - if client == nil { - client = DefaultHTTPClient - } - return &HTTPClient{client: client, response: response, url: parsedURL}, nil -} - -func NewHTTPClient(urlstr string) (Transport, error) { - return NewHTTPClientWithOptions(urlstr, HTTPClientOptions{}) -} - -func NewHTTPPostClientWithOptions(urlstr string, options HTTPClientOptions) (Transport, error) { - parsedURL, err := url.Parse(urlstr) - if err != nil { - return nil, err - } - buf := make([]byte, 0, 1024) - client := options.Client - if client == nil { - client = DefaultHTTPClient - } - return &HTTPClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: http.Header{}}, nil -} - -func NewHTTPPostClient(urlstr string) (Transport, error) { - return NewHTTPPostClientWithOptions(urlstr, HTTPClientOptions{}) -} - -// Set the HTTP Header for this specific Thrift Transport -// It is important that you first assert the Transport as a HTTPClient type -// like so: -// -// httpTrans := trans.(HTTPClient) -// httpTrans.SetHeader("User-Agent","Thrift Client 1.0") -func (p *HTTPClient) SetHeader(key string, value string) { - p.header.Add(key, value) -} - -// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport -// It is important that you first assert the Transport as a HTTPClient type -// like so: -// -// httpTrans := trans.(HTTPClient) -// hdrValue := httpTrans.GetHeader("User-Agent") -func (p *HTTPClient) GetHeader(key string) string { - return p.header.Get(key) -} - -// Deletes the HTTP Header given a Header Key for this specific Thrift Transport -// It is important that you first assert the Transport as a HTTPClient type -// like so: -// -// httpTrans := trans.(HTTPClient) -// httpTrans.DelHeader("User-Agent") -func (p *HTTPClient) DelHeader(key string) { - p.header.Del(key) -} - -func (p *HTTPClient) Open() error { - // do nothing - return nil -} - -func (p *HTTPClient) IsOpen() bool { - return p.response != nil || p.requestBuffer != nil -} - -func (p *HTTPClient) closeResponse() error { - var err error - if p.response != nil && p.response.Body != nil { - // The docs specify that if keepalive is enabled and the response body is not - // read to completion the connection will never be returned to the pool and - // reused. Errors are being ignored here because if the connection is invalid - // and this fails for some reason, the Close() method will do any remaining - // cleanup. - io.Copy(ioutil.Discard, p.response.Body) - - err = p.response.Body.Close() - } - - p.response = nil - return err -} - -func (p *HTTPClient) Close() error { - if p.requestBuffer != nil { - p.requestBuffer.Reset() - p.requestBuffer = nil - } - return p.closeResponse() -} - -func (p *HTTPClient) Read(buf []byte) (int, error) { - if p.response == nil { - return 0, NewTransportException(NOT_OPEN, "Response buffer is empty, no request.") - } - n, err := p.response.Body.Read(buf) - if n > 0 && (err == nil || err == io.EOF) { - return n, nil - } - return n, NewTransportExceptionFromError(err) -} - -func (p *HTTPClient) ReadByte() (c byte, err error) { - return readByte(p.response.Body) -} - -func (p *HTTPClient) Write(buf []byte) (int, error) { - n, err := p.requestBuffer.Write(buf) - return n, err -} - -func (p *HTTPClient) WriteByte(c byte) error { - return p.requestBuffer.WriteByte(c) -} - -func (p *HTTPClient) WriteString(s string) (n int, err error) { - return p.requestBuffer.WriteString(s) -} - -func (p *HTTPClient) Flush() error { - // Close any previous response body to avoid leaking connections. - p.closeResponse() - - req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer) - if err != nil { - return NewTransportExceptionFromError(err) - } - p.header.Add("Content-Type", "application/x-thrift") - req.Header = p.header - response, err := p.client.Do(req) - if err != nil { - return NewTransportExceptionFromError(err) - } - if response.StatusCode != http.StatusOK { - // Close the response to avoid leaking file descriptors. closeResponse does - // more than just call Close(), so temporarily assign it and reuse the logic. - p.response = response - p.closeResponse() - - // TODO(pomack) log bad response - return NewTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode)) - } - p.response = response - return nil -} - -func (p *HTTPClient) RemainingBytes() (num_bytes uint64) { - len := p.response.ContentLength - if len >= 0 { - return uint64(len) - } - - return UnknownRemaining // the truth is, we just don't know unless framed is used -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/http_transport.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/http_transport.go deleted file mode 100644 index 055b9e9a..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/http_transport.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "context" - "net" - "net/http" -) - -// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function -func NewThriftHandlerFunc(processor Processor, - inPfactory, outPfactory ProtocolFactory) func(w http.ResponseWriter, r *http.Request) { - - return func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/x-thrift") - transport := NewStreamTransport(r.Body, w) - Process(processor, inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) - } -} - -// NewThriftHandlerContextFunc is a function that create a ready to use Apache Thrift Handler function -func NewThriftHandlerContextFunc(processor ProcessorContext, - inPfactory, outPfactory ProtocolFactory) func(w http.ResponseWriter, r *http.Request) { - - return func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/x-thrift") - transport := NewStreamTransport(r.Body, w) - ctx := newConnInfoFromHTTP(r) - ProcessContext(ctx, processor, inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) - } -} - -func newConnInfoFromHTTP(r *http.Request) context.Context { - ctx := r.Context() - laddr, _ := ctx.Value(http.LocalAddrContextKey).(net.Addr) - raddr, _ := net.ResolveTCPAddr("tcp", r.RemoteAddr) - return context.WithValue(context.Background(), connInfoKey, ConnInfo{ - LocalAddr: laddr, - RemoteAddr: raddr, - tlsState: r.TLS, - }) -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/interceptor.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/interceptor.go deleted file mode 100644 index 4dded6e8..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/interceptor.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package thrift - -import ( - "context" -) - -// Interceptor is a function that runs before the actual method. It is passed -// the connection context, the method name and the args for that method. -// The interceptor is responsible for calling pfunc.RunContext() and it can -// return a result or an exception which are then sent back to the caller. -// The interceptor is expected to be concurrency safe. -type Interceptor func(ctx context.Context, methodName string, pfunc ProcessorFunctionContext, args Struct) (WritableStruct, ApplicationException) - -type interceptorProcessor struct { - interceptor Interceptor - Processor -} - -// WrapInterceptor wraps an interceptor around the Processor p -// such as when running the method returned by that processor it will execute -// the interceptor instead. The interceptor is executed with -// context.Background() as its context. -func WrapInterceptor(interceptor Interceptor, p Processor) Processor { - if interceptor == nil { - return p - } - return &interceptorProcessor{ - interceptor: interceptor, - Processor: p, - } -} - -func (p *interceptorProcessor) GetProcessorFunction(name string) (ProcessorFunction, error) { - pf, err := p.Processor.GetProcessorFunction(name) - if err != nil { - return nil, err - } - return &interceptorProcessorFunction{ - interceptor: p.interceptor, - methodName: name, - ProcessorFunction: pf, - }, nil -} - -type interceptorProcessorFunction struct { - interceptor Interceptor - methodName string - ProcessorFunction -} - -func (pf *interceptorProcessorFunction) Run(args Struct) (WritableStruct, ApplicationException) { - ctxPf := NewProcessorFunctionContextAdapter(pf.ProcessorFunction) - return pf.interceptor(context.Background(), pf.methodName, ctxPf, args) -} - -type interceptorProcessorContext struct { - interceptor Interceptor - ProcessorContext -} - -// WrapInterceptorContext wraps an interceptor around the ProcessorContext p -// such as when running the method returned by that processor it will execute -// the interceptor instead. -func WrapInterceptorContext(interceptor Interceptor, p ProcessorContext) ProcessorContext { - if interceptor == nil { - return p - } - return &interceptorProcessorContext{ - interceptor: interceptor, - ProcessorContext: p, - } -} - -func (p *interceptorProcessorContext) GetProcessorFunctionContext(name string) (ProcessorFunctionContext, error) { - pf, err := p.ProcessorContext.GetProcessorFunctionContext(name) - if err != nil { - return nil, err - } - return &interceptorProcessorFunctionContext{ - interceptor: p.interceptor, - methodName: name, - ProcessorFunctionContext: pf, - }, nil -} - -type interceptorProcessorFunctionContext struct { - interceptor Interceptor - methodName string - ProcessorFunctionContext -} - -func (pf *interceptorProcessorFunctionContext) RunContext(ctx context.Context, args Struct) (WritableStruct, ApplicationException) { - return pf.interceptor(ctx, pf.methodName, pf.ProcessorFunctionContext, args) -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/iostream_transport.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/iostream_transport.go deleted file mode 100644 index 4bbd13be..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/iostream_transport.go +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "bufio" - "io" -) - -// StreamTransport is a Transport made of an io.Reader and/or an io.Writer -type StreamTransport struct { - io.Reader - io.Writer - isReadWriter bool - closed bool -} - -type StreamTransportFactory struct { - Reader io.Reader - Writer io.Writer - isReadWriter bool -} - -func (p *StreamTransportFactory) GetTransport(trans Transport) Transport { - if trans != nil { - t, ok := trans.(*StreamTransport) - if ok { - if t.isReadWriter { - return NewStreamTransportRW(t.Reader.(io.ReadWriter)) - } - if t.Reader != nil && t.Writer != nil { - return NewStreamTransport(t.Reader, t.Writer) - } - if t.Reader != nil && t.Writer == nil { - return NewStreamTransportR(t.Reader) - } - if t.Reader == nil && t.Writer != nil { - return NewStreamTransportW(t.Writer) - } - return &StreamTransport{} - } - } - if p.isReadWriter { - return NewStreamTransportRW(p.Reader.(io.ReadWriter)) - } - if p.Reader != nil && p.Writer != nil { - return NewStreamTransport(p.Reader, p.Writer) - } - if p.Reader != nil && p.Writer == nil { - return NewStreamTransportR(p.Reader) - } - if p.Reader == nil && p.Writer != nil { - return NewStreamTransportW(p.Writer) - } - return &StreamTransport{} -} - -func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory { - return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter} -} - -func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport { - return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)} -} - -func NewStreamTransportR(r io.Reader) *StreamTransport { - return &StreamTransport{Reader: bufio.NewReader(r)} -} - -func NewStreamTransportW(w io.Writer) *StreamTransport { - return &StreamTransport{Writer: bufio.NewWriter(w)} -} - -func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport { - bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw)) - return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true} -} - -func (p *StreamTransport) IsOpen() bool { - return !p.closed -} - -// implicitly opened on creation, can't be reopened once closed -func (p *StreamTransport) Open() error { - if !p.closed { - return NewTransportException(ALREADY_OPEN, "StreamTransport already open.") - } else { - return NewTransportException(NOT_OPEN, "cannot reopen StreamTransport.") - } -} - -// Closes both the input and output streams. -func (p *StreamTransport) Close() error { - if p.closed { - return NewTransportException(NOT_OPEN, "StreamTransport already closed.") - } - p.closed = true - closedReader := false - if p.Reader != nil { - c, ok := p.Reader.(io.Closer) - if ok { - e := c.Close() - closedReader = true - if e != nil { - return e - } - } - p.Reader = nil - } - if p.Writer != nil && (!closedReader || !p.isReadWriter) { - c, ok := p.Writer.(io.Closer) - if ok { - e := c.Close() - if e != nil { - return e - } - } - p.Writer = nil - } - return nil -} - -// Flushes the underlying output stream if not null. -func (p *StreamTransport) Flush() error { - if p.Writer == nil { - return NewTransportException(NOT_OPEN, "Cannot flush null outputStream") - } - f, ok := p.Writer.(Flusher) - if ok { - err := f.Flush() - if err != nil { - return NewTransportExceptionFromError(err) - } - } - return nil -} - -func (p *StreamTransport) Read(c []byte) (n int, err error) { - n, err = p.Reader.Read(c) - if err != nil { - err = NewTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) ReadByte() (c byte, err error) { - f, ok := p.Reader.(io.ByteReader) - if ok { - c, err = f.ReadByte() - } else { - c, err = readByte(p.Reader) - } - if err != nil { - err = NewTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) Write(c []byte) (n int, err error) { - n, err = p.Writer.Write(c) - if err != nil { - err = NewTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) WriteByte(c byte) (err error) { - f, ok := p.Writer.(io.ByteWriter) - if ok { - err = f.WriteByte(c) - } else { - err = writeByte(p.Writer, c) - } - if err != nil { - err = NewTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) WriteString(s string) (n int, err error) { - f, ok := p.Writer.(stringWriter) - if ok { - n, err = f.WriteString(s) - } else { - n, err = p.Writer.Write([]byte(s)) - } - if err != nil { - err = NewTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) RemainingBytes() (num_bytes uint64) { - return UnknownRemaining // the truth is, we just don't know unless framed is used -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/json_protocol.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/json_protocol.go deleted file mode 100644 index 5b3c2ad5..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/json_protocol.go +++ /dev/null @@ -1,597 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "encoding/base64" - "fmt" -) - -const ( - THRIFT_JSON_PROTOCOL_VERSION = 1 -) - -// for references to _ParseContext see tsimplejson_protocol.go - -// JSON protocol implementation for thrift. -// -// This protocol produces/consumes a simple output format -// suitable for parsing by scripting languages. It should not be -// confused with the full-featured JSONProtocol. -// -type JSONProtocol struct { - *SimpleJSONProtocol -} - -// Constructor -func NewJSONProtocol(t Transport) *JSONProtocol { - v := &JSONProtocol{SimpleJSONProtocol: NewSimpleJSONProtocol(t)} - v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) - v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) - return v -} - -// Factory -type JSONProtocolFactory struct{} - -func (p *JSONProtocolFactory) GetProtocol(trans Transport) Protocol { - return NewJSONProtocol(trans) -} - -func NewJSONProtocolFactory() *JSONProtocolFactory { - return &JSONProtocolFactory{} -} - -func (p *JSONProtocol) WriteMessageBegin(name string, typeId MessageType, seqId int32) error { - p.resetContextStack() // THRIFT-3735 - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil { - return e - } - if e := p.WriteString(name); e != nil { - return e - } - if e := p.WriteByte(byte(typeId)); e != nil { - return e - } - if e := p.WriteI32(seqId); e != nil { - return e - } - return nil -} - -func (p *JSONProtocol) WriteMessageEnd() error { - return p.OutputListEnd() -} - -func (p *JSONProtocol) WriteStructBegin(name string) error { - if e := p.OutputObjectBegin(); e != nil { - return e - } - return nil -} - -func (p *JSONProtocol) WriteStructEnd() error { - return p.OutputObjectEnd() -} - -func (p *JSONProtocol) WriteFieldBegin(name string, typeId Type, id int16) error { - if e := p.WriteI16(id); e != nil { - return e - } - if e := p.OutputObjectBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(typeId) - if e1 != nil { - return e1 - } - if e := p.WriteString(s); e != nil { - return e - } - return nil -} - -func (p *JSONProtocol) WriteFieldEnd() error { - return p.OutputObjectEnd() -} - -func (p *JSONProtocol) WriteFieldStop() error { return nil } - -func (p *JSONProtocol) WriteMapBegin(keyType Type, valueType Type, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(keyType) - if e1 != nil { - return e1 - } - if e := p.WriteString(s); e != nil { - return e - } - s, e1 = p.TypeIdToString(valueType) - if e1 != nil { - return e1 - } - if e := p.WriteString(s); e != nil { - return e - } - if e := p.WriteI64(int64(size)); e != nil { - return e - } - return p.OutputObjectBegin() -} - -func (p *JSONProtocol) WriteMapEnd() error { - if e := p.OutputObjectEnd(); e != nil { - return e - } - return p.OutputListEnd() -} - -func (p *JSONProtocol) WriteListBegin(elemType Type, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *JSONProtocol) WriteListEnd() error { - return p.OutputListEnd() -} - -func (p *JSONProtocol) WriteSetBegin(elemType Type, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *JSONProtocol) WriteSetEnd() error { - return p.OutputListEnd() -} - -func (p *JSONProtocol) WriteBool(b bool) error { - if b { - return p.WriteI32(1) - } - return p.WriteI32(0) -} - -func (p *JSONProtocol) WriteByte(b byte) error { - return p.WriteI32(int32(b)) -} - -func (p *JSONProtocol) WriteI16(v int16) error { - return p.WriteI32(int32(v)) -} - -func (p *JSONProtocol) WriteI32(v int32) error { - return p.OutputI64(int64(v)) -} - -func (p *JSONProtocol) WriteI64(v int64) error { - return p.OutputI64(int64(v)) -} - -func (p *JSONProtocol) WriteDouble(v float64) error { - return p.OutputF64(v) -} - -func (p *JSONProtocol) WriteFloat(v float32) error { - return p.OutputF32(v) -} - -func (p *JSONProtocol) WriteString(v string) error { - return p.OutputString(v) -} - -func (p *JSONProtocol) WriteBinary(v []byte) error { - // JSON library only takes in a string, - // not an arbitrary byte array, to ensure bytes are transmitted - // efficiently we must convert this into a valid JSON string - // therefore we use base64 encoding to avoid excessive escaping/quoting - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewProtocolException(e) - } - writer := base64.NewEncoder(base64.StdEncoding, p.writer) - if _, e := writer.Write(v); e != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - return NewProtocolException(e) - } - if e := writer.Close(); e != nil { - return NewProtocolException(e) - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewProtocolException(e) - } - return p.OutputPostValue() -} - -// Reading methods. -func (p *JSONProtocol) ReadMessageBegin() (name string, typeId MessageType, seqId int32, err error) { - p.resetContextStack() // THRIFT-3735 - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, typeId, seqId, err - } - version, err := p.ReadI32() - if err != nil { - return name, typeId, seqId, err - } - if version != THRIFT_JSON_PROTOCOL_VERSION { - e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION) - return name, typeId, seqId, NewProtocolExceptionWithType(INVALID_DATA, e) - - } - if name, err = p.ReadString(); err != nil { - return name, typeId, seqId, err - } - bTypeId, err := p.ReadByte() - typeId = MessageType(bTypeId) - if err != nil { - return name, typeId, seqId, err - } - if seqId, err = p.ReadI32(); err != nil { - return name, typeId, seqId, err - } - return name, typeId, seqId, nil -} - -func (p *JSONProtocol) ReadMessageEnd() error { - err := p.ParseListEnd() - return err -} - -func (p *JSONProtocol) ReadStructBegin() (name string, err error) { - _, err = p.ParseObjectStart() - return "", err -} - -func (p *JSONProtocol) ReadStructEnd() error { - return p.ParseObjectEnd() -} - -func (p *JSONProtocol) ReadFieldBegin() (string, Type, int16, error) { - b, _ := p.reader.Peek(1) - if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] { - return "", STOP, -1, nil - } - fieldId, err := p.ReadI16() - if err != nil { - return "", STOP, fieldId, err - } - if _, err = p.ParseObjectStart(); err != nil { - return "", STOP, fieldId, err - } - sType, err := p.ReadString() - if err != nil { - return "", STOP, fieldId, err - } - fType, err := p.StringToTypeId(sType) - return "", fType, fieldId, err -} - -func (p *JSONProtocol) ReadFieldEnd() error { - return p.ParseObjectEnd() -} - -func (p *JSONProtocol) ReadMapBegin() (keyType Type, valueType Type, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, VOID, 0, e - } - - // read keyType - sKeyType, e := p.ReadString() - if e != nil { - return keyType, valueType, size, e - } - keyType, e = p.StringToTypeId(sKeyType) - if e != nil { - return keyType, valueType, size, e - } - - // read valueType - sValueType, e := p.ReadString() - if e != nil { - return keyType, valueType, size, e - } - valueType, e = p.StringToTypeId(sValueType) - if e != nil { - return keyType, valueType, size, e - } - - // read size - iSize, e := p.ReadI64() - if e != nil { - return keyType, valueType, size, e - } - size = int(iSize) - - _, e = p.ParseObjectStart() - return keyType, valueType, size, e -} - -func (p *JSONProtocol) ReadMapEnd() error { - e := p.ParseObjectEnd() - if e != nil { - return e - } - return p.ParseListEnd() -} - -func (p *JSONProtocol) ReadListBegin() (elemType Type, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *JSONProtocol) ReadListEnd() error { - return p.ParseListEnd() -} - -func (p *JSONProtocol) ReadSetBegin() (elemType Type, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *JSONProtocol) ReadSetEnd() error { - return p.ParseListEnd() -} - -func (p *JSONProtocol) ReadBool() (bool, error) { - value, err := p.ReadI32() - return (value != 0), err -} - -func (p *JSONProtocol) ReadByte() (byte, error) { - v, err := p.ReadI64() - return byte(v), err -} - -func (p *JSONProtocol) ReadI16() (int16, error) { - v, err := p.ReadI64() - return int16(v), err -} - -func (p *JSONProtocol) ReadI32() (int32, error) { - v, err := p.ReadI64() - return int32(v), err -} - -func (p *JSONProtocol) ReadI64() (int64, error) { - v, _, err := p.ParseI64() - return v, err -} - -func (p *JSONProtocol) ReadDouble() (float64, error) { - v, _, err := p.ParseF64() - return v, err -} - -func (p *JSONProtocol) ReadFloat() (float32, error) { - v, _, err := p.ParseF32() - return v, err -} - -func (p *JSONProtocol) ReadString() (string, error) { - var v string - if err := p.ParsePreValue(); err != nil { - return v, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseStringBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewProtocolExceptionWithType(INVALID_DATA, e) - } - return v, p.ParsePostValue() -} - -func (p *JSONProtocol) ReadBinary() ([]byte, error) { - var v []byte - if err := p.ParsePreValue(); err != nil { - return nil, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseBase64EncodedBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewProtocolExceptionWithType(INVALID_DATA, e) - } - - return v, p.ParsePostValue() -} - -func (p *JSONProtocol) Flush() (err error) { - err = p.writer.Flush() - if err == nil { - err = p.trans.Flush() - } - return NewProtocolException(err) -} - -func (p *JSONProtocol) Skip(fieldType Type) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *JSONProtocol) Transport() Transport { - return p.trans -} - -func (p *JSONProtocol) OutputElemListBegin(elemType Type, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(elemType) - if e1 != nil { - return e1 - } - if e := p.WriteString(s); e != nil { - return e - } - if e := p.WriteI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *JSONProtocol) ParseElemListBegin() (elemType Type, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - sElemType, err := p.ReadString() - if err != nil { - return VOID, size, err - } - elemType, err = p.StringToTypeId(sElemType) - if err != nil { - return elemType, size, err - } - nSize, err2 := p.ReadI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *JSONProtocol) readElemListBegin() (elemType Type, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - sElemType, err := p.ReadString() - if err != nil { - return VOID, size, err - } - elemType, err = p.StringToTypeId(sElemType) - if err != nil { - return elemType, size, err - } - nSize, err2 := p.ReadI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *JSONProtocol) writeElemListBegin(elemType Type, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(elemType) - if e1 != nil { - return e1 - } - if e := p.OutputString(s); e != nil { - return e - } - if e := p.OutputI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *JSONProtocol) TypeIdToString(fieldType Type) (string, error) { - switch byte(fieldType) { - case BOOL: - return "tf", nil - case BYTE: - return "i8", nil - case I16: - return "i16", nil - case I32: - return "i32", nil - case I64: - return "i64", nil - case DOUBLE: - return "dbl", nil - case FLOAT: - return "flt", nil - case STRING: - return "str", nil - case STRUCT: - return "rec", nil - case MAP: - return "map", nil - case SET: - return "set", nil - case LIST: - return "lst", nil - } - - e := fmt.Errorf("Unknown fieldType: %d", int(fieldType)) - return "", NewProtocolExceptionWithType(INVALID_DATA, e) -} - -func (p *JSONProtocol) StringToTypeId(fieldType string) (Type, error) { - switch fieldType { - case "tf": - return Type(BOOL), nil - case "i8": - return Type(BYTE), nil - case "i16": - return Type(I16), nil - case "i32": - return Type(I32), nil - case "i64": - return Type(I64), nil - case "dbl": - return Type(DOUBLE), nil - case "flt": - return Type(FLOAT), nil - case "str": - return Type(STRING), nil - case "rec": - return Type(STRUCT), nil - case "map": - return Type(MAP), nil - case "set": - return Type(SET), nil - case "lst": - return Type(LIST), nil - } - - e := fmt.Errorf("Unknown type identifier: %s", fieldType) - return Type(STOP), NewProtocolExceptionWithType(INVALID_DATA, e) -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/memory_buffer.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/memory_buffer.go deleted file mode 100644 index fc431a71..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/memory_buffer.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "bytes" -) - -// Memory buffer-based implementation of the Transport interface. -type MemoryBuffer struct { - *bytes.Buffer - size int -} - -type MemoryBufferTransportFactory struct { - size int -} - -func (p *MemoryBufferTransportFactory) GetTransport(trans Transport) Transport { - if trans != nil { - t, ok := trans.(*MemoryBuffer) - if ok && t.size > 0 { - return NewMemoryBufferLen(t.size) - } - } - return NewMemoryBufferLen(p.size) -} - -func NewMemoryBufferTransportFactory(size int) *MemoryBufferTransportFactory { - return &MemoryBufferTransportFactory{size: size} -} - -func NewMemoryBuffer() *MemoryBuffer { - return &MemoryBuffer{Buffer: &bytes.Buffer{}, size: 0} -} - -func NewMemoryBufferLen(size int) *MemoryBuffer { - buf := make([]byte, 0, size) - return &MemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size} -} - -func (p *MemoryBuffer) IsOpen() bool { - return true -} - -func (p *MemoryBuffer) Open() error { - return nil -} - -func (p *MemoryBuffer) Close() error { - p.Buffer.Reset() - return nil -} - -// Flushing a memory buffer is a no-op -func (p *MemoryBuffer) Flush() error { - return nil -} - -func (p *MemoryBuffer) RemainingBytes() (num_bytes uint64) { - return uint64(p.Buffer.Len()) -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/messagetype.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/messagetype.go deleted file mode 100644 index 73616391..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/messagetype.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -// Message type constants in the Thrift protocol. -type MessageType int32 - -const ( - INVALID_MESSAGE_TYPE MessageType = 0 - CALL MessageType = 1 - REPLY MessageType = 2 - EXCEPTION MessageType = 3 - ONEWAY MessageType = 4 -) diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/multiplexed_protocol.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/multiplexed_protocol.go deleted file mode 100644 index 610737fa..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/multiplexed_protocol.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "fmt" - "strings" -) - -/* -MultiplexedProtocol is a protocol-independent concrete decorator -that allows a Thrift client to communicate with a multiplexing Thrift server, -by prepending the service name to the function name during function calls. - -NOTE: THIS IS NOT USED BY SERVERS. On the server, use MultiplexedProcessor to handle request -from a multiplexing client. - -This example uses a single socket transport to invoke two services: - -socket := thrift.NewSocket(thrift.SocketArrd(addr), thrif.SocketTimeout(TIMEOUT)) -transport := thrift.NewFramedTransport(socket) -protocol := thrift.NewBinaryProtocolTransport(transport) - -mp := thrift.NewMultiplexedProtocol(protocol, "Calculator") -service := Calculator.NewCalculatorClient(mp) - -mp2 := thrift.NewMultiplexedProtocol(protocol, "WeatherReport") -service2 := WeatherReport.NewWeatherReportClient(mp2) - -err := transport.Open() -if err != nil { - t.Fatal("Unable to open client socket", err) -} - -fmt.Println(service.Add(2,2)) -fmt.Println(service2.GetTemperature()) -*/ - -type MultiplexedProtocol struct { - Protocol - serviceName string -} - -const MULTIPLEXED_SEPARATOR = ":" - -func NewMultiplexedProtocol(protocol Protocol, serviceName string) *MultiplexedProtocol { - return &MultiplexedProtocol{ - Protocol: protocol, - serviceName: serviceName, - } -} - -func (t *MultiplexedProtocol) WriteMessageBegin(name string, typeId MessageType, seqid int32) error { - if typeId == CALL || typeId == ONEWAY { - return t.Protocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) - } else { - return t.Protocol.WriteMessageBegin(name, typeId, seqid) - } -} - -/* -MultiplexedProcessor is a Processor allowing -a single Server to provide multiple services. - -To do so, you instantiate the processor and then register additional -processors with it, as shown in the following example: - -var processor = thrift.NewMultiplexedProcessor() - -firsprocessor := -processor.RegisterProcessor("FirstService", firsprocessor) - -processor.registerProcessor( - "Calculator", - Calculator.NewCalculatorProcessor(&CalculatorHandler{}), -) - -processor.registerProcessor( - "WeatherReport", - WeatherReport.NewWeatherReporprocessor(&WeatherReportHandler{}), -) - -serverTransport, err := thrift.NewServerSocketTimeout(addr, TIMEOUT) -if err != nil { - t.Fatal("Unable to create server socket", err) -} -server := thrift.NewSimpleServer(processor, serverTransport) -server.Serve(); -*/ - -type MultiplexedProcessor struct { - serviceProcessorMap map[string]Processor - Defaulprocessor Processor -} - -func NewMultiplexedProcessor() *MultiplexedProcessor { - return &MultiplexedProcessor{ - serviceProcessorMap: make(map[string]Processor), - } -} - -func (t *MultiplexedProcessor) RegisterDefault(processor Processor) { - t.Defaulprocessor = processor -} - -func (t *MultiplexedProcessor) RegisterProcessor(name string, processor Processor) { - if t.serviceProcessorMap == nil { - t.serviceProcessorMap = make(map[string]Processor) - } - t.serviceProcessorMap[name] = processor -} - -// GetProcessorFunction implements the thrift.Processor interface. It parses the -// thrift function name to figure out which processor to route the request to and -// returns descriptive error messages to help clients diagnose errors. -func (t *MultiplexedProcessor) GetProcessorFunction(name string) (ProcessorFunction, error) { - //extract the service name - v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) - if len(v) != 2 { - if t.Defaulprocessor != nil { - return t.Defaulprocessor.GetProcessorFunction(name) - } - return nil, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a MultiplexProtocol in your client?", name) - } - actualProcessor, ok := t.serviceProcessorMap[v[0]] - if !ok { - return nil, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0]) - } - return actualProcessor.GetProcessorFunction(v[1]) -} - -//Protocol that use stored message for ReadMessageBegin -type storedMessageProtocol struct { - Protocol - name string - typeId MessageType - seqid int32 -} - -func NewStoredMessageProtocol(protocol Protocol, name string, typeId MessageType, seqid int32) *storedMessageProtocol { - return &storedMessageProtocol{protocol, name, typeId, seqid} -} - -func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId MessageType, seqid int32, err error) { - return s.name, s.typeId, s.seqid, nil -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/numeric.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/numeric.go deleted file mode 100644 index ec6b8bba..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/numeric.go +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "math" - "strconv" -) - -type Numeric interface { - Int64() int64 - Int32() int32 - Int16() int16 - Byte() byte - Int() int - Float64() float64 - Float32() float32 - String() string - isNull() bool -} - -type numeric struct { - iValue int64 - dValue float64 - sValue string - isNil bool -} - -var ( - INFINITY Numeric = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} - NEGATIVE_INFINITY Numeric = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} - NAN Numeric = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} - ZERO Numeric = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} - NUMERIC_NULL Numeric = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} -) - -func NewNumericFromDouble(dValue float64) Numeric { - if math.IsInf(dValue, 1) { - return INFINITY - } - if math.IsInf(dValue, -1) { - return NEGATIVE_INFINITY - } - if math.IsNaN(dValue) { - return NAN - } - iValue := int64(dValue) - sValue := strconv.FormatFloat(dValue, 'g', 10, 64) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromFloat(dValue float32) Numeric { - if math.IsInf(float64(dValue), 1) { - return INFINITY - } - if math.IsInf(float64(dValue), -1) { - return NEGATIVE_INFINITY - } - if math.IsNaN(float64(dValue)) { - return NAN - } - iValue := int32(dValue) - sValue := strconv.FormatFloat(float64(dValue), 'g', 10, 32) - isNil := false - return &numeric{iValue: int64(iValue), dValue: float64(dValue), sValue: sValue, isNil: isNil} -} - -func NewNumericFromI64(iValue int64) Numeric { - dValue := float64(iValue) - sValue := string(iValue) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromI32(iValue int32) Numeric { - dValue := float64(iValue) - sValue := string(iValue) - isNil := false - return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromString(sValue string) Numeric { - if sValue == INFINITY.String() { - return INFINITY - } - if sValue == NEGATIVE_INFINITY.String() { - return NEGATIVE_INFINITY - } - if sValue == NAN.String() { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - isNil := len(sValue) == 0 - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromJSONString(sValue string, isNull bool) Numeric { - if isNull { - return NewNullNumeric() - } - if sValue == JSON_INFINITY { - return INFINITY - } - if sValue == JSON_NEGATIVE_INFINITY { - return NEGATIVE_INFINITY - } - if sValue == JSON_NAN { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} -} - -func NewNullNumeric() Numeric { - return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true} -} - -func (p *numeric) Int64() int64 { - return p.iValue -} - -func (p *numeric) Int32() int32 { - return int32(p.iValue) -} - -func (p *numeric) Int16() int16 { - return int16(p.iValue) -} - -func (p *numeric) Byte() byte { - return byte(p.iValue) -} - -func (p *numeric) Int() int { - return int(p.iValue) -} - -func (p *numeric) Float64() float64 { - return p.dValue -} - -func (p *numeric) Float32() float32 { - return float32(p.dValue) -} - -func (p *numeric) String() string { - return p.sValue -} - -func (p *numeric) isNull() bool { - return p.isNil -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/pointerize.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/pointerize.go deleted file mode 100644 index b7edfb49..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/pointerize.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -/////////////////////////////////////////////////////////////////////////////// -// This file is home to helpers that convert from various base types to -// respective pointer types. This is necessary because Go does not permit -// references to constants, nor can a pointer type to base type be allocated -// and initialized in a single expression. -// -// E.g., this is not allowed: -// -// var ip *int = &5 -// -// But this *is* allowed: -// -// func IntPtr(i int) *int { return &i } -// var ip *int = IntPtr(5) -// -// Since pointers to base types are commonplace as [optional] fields in -// exported thrift structs, we factor such helpers here. -/////////////////////////////////////////////////////////////////////////////// - -func Float32Ptr(v float32) *float32 { return &v } -func Float64Ptr(v float64) *float64 { return &v } -func IntPtr(v int) *int { return &v } -func Int16Ptr(v int16) *int16 { return &v } -func Int32Ptr(v int32) *int32 { return &v } -func Int64Ptr(v int64) *int64 { return &v } -func StringPtr(v string) *string { return &v } -func Uint32Ptr(v uint32) *uint32 { return &v } -func Uint64Ptr(v uint64) *uint64 { return &v } -func BoolPtr(v bool) *bool { return &v } -func ByteSlicePtr(v []byte) *[]byte { return &v } diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/processor.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/processor.go deleted file mode 100644 index aaf0178e..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/processor.go +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "context" - "fmt" - "strings" -) - -// Processor exposes access to processor functions which -// manage I/O and processing of a input message for a specific -// server function -type Processor interface { - // GetProcessorFunction is given the name of a thrift function and - // the type of the inbound thrift message. It is expected to return - // a non-nil ProcessorFunction when the function can be successfully - // found. - // - // If an error is returned, it will be wrapped in an application level - // thrift exception and returned. - // - // If ProcessorFunction and error are both nil, a generic error will be - // sent which explains that no processor function exists with the specified - // name on this server. - GetProcessorFunction(name string) (ProcessorFunction, error) -} - -// ProcessorFunction is the interface that must be implemented in -// order to perform io and message processing -type ProcessorFunction interface { - // Read a serializable message from the input protocol. - Read(iprot Protocol) (Struct, Exception) - // Process a message handing it to the client handler. - Run(args Struct) (WritableStruct, ApplicationException) - // Write a serializable responsne - Write(seqID int32, result WritableStruct, oprot Protocol) Exception -} - -// Process is a utility function to take a processor and an input and output -// protocol, and fully process a message. It understands the thrift protocol. -// A framework could be written outside of the thrift library but would need to -// duplicate this logic. -func Process(processor Processor, iprot, oprot Protocol) (keepOpen bool, exc Exception) { - return ProcessContext(context.Background(), NewProcessorContextAdapter(processor), iprot, oprot) -} - -// ProcessorContext is a Processor that supports contexts. -type ProcessorContext interface { - GetProcessorFunctionContext(name string) (ProcessorFunctionContext, error) -} - -// NewProcessorContextAdapter creates a ProcessorContext from a regular Processor. -func NewProcessorContextAdapter(p Processor) ProcessorContext { - return &ctxProcessorAdapter{p} -} - -type ctxProcessorAdapter struct { - Processor -} - -func (p ctxProcessorAdapter) GetProcessorFunctionContext(name string) (ProcessorFunctionContext, error) { - f, err := p.Processor.GetProcessorFunction(name) - if err != nil { - return nil, err - } - return NewProcessorFunctionContextAdapter(f), nil -} - -// ProcessorFunctionContext is a ProcessorFunction that supports contexts. -type ProcessorFunctionContext interface { - Read(iprot Protocol) (Struct, Exception) - RunContext(ctx context.Context, args Struct) (WritableStruct, ApplicationException) - Write(seqID int32, result WritableStruct, oprot Protocol) Exception -} - -// NewProcessorFunctionContextAdapter creates a ProcessorFunctionContext from a regular ProcessorFunction. -func NewProcessorFunctionContextAdapter(p ProcessorFunction) ProcessorFunctionContext { - return &ctxProcessorFunctionAdapter{p} -} - -type ctxProcessorFunctionAdapter struct { - ProcessorFunction -} - -func (p ctxProcessorFunctionAdapter) RunContext(ctx context.Context, args Struct) (WritableStruct, ApplicationException) { - return p.ProcessorFunction.Run(args) -} - -// ProcessContext is a Process that supports contexts. -func ProcessContext(ctx context.Context, processor ProcessorContext, iprot, oprot Protocol) (keepOpen bool, ext Exception) { - name, messageType, seqID, rerr := iprot.ReadMessageBegin() - if rerr != nil { - if err, ok := rerr.(TransportException); ok && err.TypeID() == END_OF_FILE { - // connection terminated because client closed connection - return false, nil - } - return false, rerr - } - var err ApplicationException - var pfunc ProcessorFunctionContext - if messageType != CALL && messageType != ONEWAY { - // case one: invalid message type - err = NewApplicationException(UNKNOWN_METHOD, fmt.Sprintf("unexpected message type: %d", messageType)) - // error should be sent, connection should stay open if successful - } - if err == nil { - pf, e2 := processor.GetProcessorFunctionContext(name) - if pf == nil { - if e2 == nil { - err = NewApplicationException(UNKNOWN_METHOD, fmt.Sprintf("no such function: %q", name)) - } else { - err = NewApplicationException(UNKNOWN_METHOD, e2.Error()) - } - } else { - pfunc = pf - } - } - - // if there was an error before we could find the Processor function, attempt to skip the protocol - // message and return an error - if err != nil { - if e2 := iprot.Skip(STRUCT); e2 != nil { - return false, e2 - } else if e2 := iprot.ReadMessageEnd(); e2 != nil { - return false, e2 - } - // for ONEWAY, we have no way to report that the processing failed. - if messageType != ONEWAY { - if e2 := sendException(oprot, name, seqID, err); e2 != nil { - return false, e2 - } - } - return true, err - } - - if pfunc == nil { - panic("logic error in thrift.Process() handler. processor function may not be nil") - } - - argStruct, e2 := pfunc.Read(iprot) - if e2 != nil { - // close connection on read failure - return false, e2 - } - var result WritableStruct - result, err = pfunc.RunContext(ctx, argStruct) - - // for ONEWAY messages, never send a response - if messageType == CALL { - // protect message writing - if err != nil { - switch oprotHeader := oprot.(type) { - case *HeaderProtocol: - // get type name without package or pointer information - fqet := strings.Replace(fmt.Sprintf("%T", err), "*", "", -1) - et := strings.Split(fqet, ".") - errorType := et[len(et)-1] - - // set header for ServiceRouter - oprotHeader.SetHeader("uex", errorType) - oprotHeader.SetHeader("uexw", err.Error()) - } - // it's an application generated error, so serialize it - // to the client - result = err - } - - if e2 := pfunc.Write(seqID, result, oprot); e2 != nil { - // close connection on write failure - return false, err - } - } - - // keep the connection open and ignore errors - // if type was CALL, error has already been serialized to client - // if type was ONEWAY, no exception is to be thrown - return true, nil -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/processor_factory.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/processor_factory.go deleted file mode 100644 index bafbeda3..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/processor_factory.go +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -// ProcessorFactory is the default processor factory which returns -// a singleton instance. -type ProcessorFactory interface { - GetProcessor(trans Transport) Processor -} - -type processorFactory struct { - processor Processor -} - -// NewProcessorFactory returns a ProcessorFactory. -func NewProcessorFactory(p Processor) ProcessorFactory { - return &processorFactory{processor: p} -} - -func (p *processorFactory) GetProcessor(trans Transport) Processor { - return p.processor -} - -/** - * The default processor factory just returns a singleton - * instance. - */ -type ProcessorFunctionFactory interface { - GetProcessorFunction(trans Transport) ProcessorFunction -} - -type processorFunctionFactory struct { - processor ProcessorFunction -} - -func NewProcessorFunctionFactory(p ProcessorFunction) ProcessorFunctionFactory { - return &processorFunctionFactory{processor: p} -} - -func (p *processorFunctionFactory) GetProcessorFunction(trans Transport) ProcessorFunction { - return p.processor -} - -// ProcessorFactoryContext is a ProcessorFactory that supports contexts. -type ProcessorFactoryContext interface { - GetProcessorContext(trans Transport) ProcessorContext -} - -type processorFactoryContext struct { - processorContext ProcessorContext -} - -// NewProcessorFactoryContext returns a ProcessorFactoryContext. -func NewProcessorFactoryContext(p ProcessorContext) ProcessorFactoryContext { - return &processorFactoryContext{processorContext: p} -} - -func (p *processorFactoryContext) GetProcessorContext(trans Transport) ProcessorContext { - return p.processorContext -} - -// NewProcessorFactoryContextAdapter creates a ProcessorFactoryContext from a regular ProcessorFactory. -func NewProcessorFactoryContextAdapter(p ProcessorFactory) ProcessorFactoryContext { - return &ctxProcessorFactoryAdapter{p} -} - -type ctxProcessorFactoryAdapter struct { - ProcessorFactory -} - -func (p ctxProcessorFactoryAdapter) GetProcessorContext(trans Transport) ProcessorContext { - return NewProcessorContextAdapter(p.ProcessorFactory.GetProcessor(trans)) -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/protocol.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/protocol.go deleted file mode 100644 index 3a1e2547..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/protocol.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "errors" - "fmt" -) - -type ProtocolID int16 - -const ( - ProtocolIDBinary ProtocolID = 0 - ProtocolIDJSON ProtocolID = 1 - ProtocolIDCompact ProtocolID = 2 - ProtocolIDDebug ProtocolID = 3 - ProtocolIDVirtual ProtocolID = 4 - ProtocolIDSimpleJSON ProtocolID = 5 -) - -func (p ProtocolID) String() string { - switch p { - case ProtocolIDBinary: - return "binary" - case ProtocolIDJSON: - return "json" - case ProtocolIDCompact: - return "compact" - case ProtocolIDDebug: - return "debug" - case ProtocolIDVirtual: - return "virtual" - case ProtocolIDSimpleJSON: - return "simplejson" - default: - return "unknown" - } -} - -const ( - VERSION_MASK = 0xffff0000 - VERSION_1 = 0x80010000 -) - -type Protocol interface { - WriteMessageBegin(name string, typeId MessageType, seqid int32) error - WriteMessageEnd() error - WriteStructBegin(name string) error - WriteStructEnd() error - WriteFieldBegin(name string, typeId Type, id int16) error - WriteFieldEnd() error - WriteFieldStop() error - WriteMapBegin(keyType Type, valueType Type, size int) error - WriteMapEnd() error - WriteListBegin(elemType Type, size int) error - WriteListEnd() error - WriteSetBegin(elemType Type, size int) error - WriteSetEnd() error - WriteBool(value bool) error - WriteByte(value byte) error - WriteI16(value int16) error - WriteI32(value int32) error - WriteI64(value int64) error - WriteDouble(value float64) error - WriteFloat(value float32) error - WriteString(value string) error - WriteBinary(value []byte) error - - ReadMessageBegin() (name string, typeId MessageType, seqid int32, err error) - ReadMessageEnd() error - ReadStructBegin() (name string, err error) - ReadStructEnd() error - ReadFieldBegin() (name string, typeId Type, id int16, err error) - ReadFieldEnd() error - ReadMapBegin() (keyType Type, valueType Type, size int, err error) - ReadMapEnd() error - ReadListBegin() (elemType Type, size int, err error) - ReadListEnd() error - ReadSetBegin() (elemType Type, size int, err error) - ReadSetEnd() error - ReadBool() (value bool, err error) - ReadByte() (value byte, err error) - ReadI16() (value int16, err error) - ReadI32() (value int32, err error) - ReadI64() (value int64, err error) - ReadDouble() (value float64, err error) - ReadFloat() (value float32, err error) - ReadString() (value string, err error) - ReadBinary() (value []byte, err error) - - Skip(fieldType Type) (err error) - Flush() (err error) - - Transport() Transport -} - -// The maximum recursive depth the skip() function will traverse -const DEFAULT_RECURSION_DEPTH = 64 - -// Skips over the next data element from the provided input Protocol object. -func SkipDefaultDepth(prot Protocol, typeId Type) (err error) { - return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH) -} - -// Skips over the next data element from the provided input Protocol object. -func Skip(self Protocol, fieldType Type, maxDepth int) (err error) { - - if maxDepth <= 0 { - return NewProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded")) - } - - switch fieldType { - case BOOL: - _, err = self.ReadBool() - return - case BYTE: - _, err = self.ReadByte() - return - case I16: - _, err = self.ReadI16() - return - case I32: - _, err = self.ReadI32() - return - case I64: - _, err = self.ReadI64() - return - case DOUBLE: - _, err = self.ReadDouble() - return - case FLOAT: - _, err = self.ReadFloat() - return - case STRING: - _, err = self.ReadString() - return - case STRUCT: - if _, err = self.ReadStructBegin(); err != nil { - return err - } - for { - _, typeId, _, _ := self.ReadFieldBegin() - if typeId == STOP { - break - } - err := Skip(self, typeId, maxDepth-1) - if err != nil { - return err - } - self.ReadFieldEnd() - } - return self.ReadStructEnd() - case MAP: - keyType, valueType, size, err := self.ReadMapBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, keyType, maxDepth-1) - if err != nil { - return err - } - self.Skip(valueType) - } - return self.ReadMapEnd() - case SET: - elemType, size, err := self.ReadSetBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadSetEnd() - case LIST: - elemType, size, err := self.ReadListBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadListEnd() - default: - return fmt.Errorf("unable to skip over unknown type id %d", fieldType) - } -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/protocol_exception.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/protocol_exception.go deleted file mode 100644 index 6cdd068f..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/protocol_exception.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "encoding/base64" -) - -// ProtocolException is the thrift protocol exception -type ProtocolException interface { - Exception - TypeID() int -} - -const ( - UNKNOWN_PROTOCOL_EXCEPTION = 0 - INVALID_DATA = 1 - NEGATIVE_SIZE = 2 - SIZE_LIMIT = 3 - BAD_VERSION = 4 - NOT_IMPLEMENTED = 5 - DEPTH_LIMIT = 6 -) - -type protocolException struct { - typeID int - message string -} - -func (p *protocolException) TypeID() int { - return p.typeID -} - -func (p *protocolException) String() string { - return p.message -} - -func (p *protocolException) Error() string { - return p.message -} - -// NewProtocolException creates a new ProtocolException -func NewProtocolException(err error) ProtocolException { - if err == nil { - return nil - } - if e, ok := err.(ProtocolException); ok { - return e - } - if _, ok := err.(base64.CorruptInputError); ok { - return &protocolException{INVALID_DATA, err.Error()} - } - return &protocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()} -} - -// NewProtocolExceptionWithType create a new ProtocolException with an error type -func NewProtocolExceptionWithType(errType int, err error) ProtocolException { - if err == nil { - return nil - } - return &protocolException{errType, err.Error()} -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/protocol_factory.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/protocol_factory.go deleted file mode 100644 index 7d4ddb99..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/protocol_factory.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -// Factory interface for constructing protocol instances. -type ProtocolFactory interface { - GetProtocol(trans Transport) Protocol -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/rich_transport.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/rich_transport.go deleted file mode 100644 index 2506c4f8..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/rich_transport.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import "io" - -type richTransport struct { - Transport -} - -// Wraps Transport to provide RichTransport interface -func NewRichTransport(trans Transport) RichTransport { - return &richTransport{trans} -} - -func (r *richTransport) ReadByte() (c byte, err error) { - return readByte(r.Transport) -} - -func (r *richTransport) WriteByte(c byte) error { - return writeByte(r.Transport, c) -} - -func (r *richTransport) WriteString(s string) (n int, err error) { - return r.Write([]byte(s)) -} - -func (r *richTransport) RemainingBytes() (num_bytes uint64) { - return r.Transport.RemainingBytes() -} - -func readByte(r io.Reader) (c byte, err error) { - v := [1]byte{0} - n, err := r.Read(v[0:1]) - if n > 0 && (err == nil || err == io.EOF) { - return v[0], nil - } - if n > 0 && err != nil { - return v[0], err - } - if err != nil { - return 0, err - } - return v[0], nil -} - -func writeByte(w io.Writer, c byte) error { - v := [1]byte{c} - _, err := w.Write(v[0:1]) - return err -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/serializer.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/serializer.go deleted file mode 100644 index c8291dd9..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/serializer.go +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -// A Serializer is used to turn a Struct in to a byte stream -type Serializer struct { - Transport *MemoryBuffer - Protocol Protocol -} - -// WritableStruct is an interface used to encapsulate a message that can be written to a protocol -type WritableStruct interface { - Write(p Protocol) error -} - -// Struct is the interface used to encapsulate a message that can be read and written to a protocol -type Struct interface { - Write(p Protocol) error - Read(p Protocol) error -} - -// NewSerializer create a new serializer using the binary protocol -func NewSerializer() *Serializer { - transport := NewMemoryBufferLen(1024) - protocol := NewBinaryProtocolFactoryDefault().GetProtocol(transport) - - return &Serializer{transport, protocol} -} - -// WriteString writes msg to the serializer and returns it as a string -func (s *Serializer) WriteString(msg Struct) (str string, err error) { - s.Transport.Reset() - - if err = msg.Write(s.Protocol); err != nil { - return - } - - if err = s.Protocol.Flush(); err != nil { - return - } - if err = s.Transport.Flush(); err != nil { - return - } - - return s.Transport.String(), nil -} - -// Write writes msg to the serializer and returns it as a byte array -func (s *Serializer) Write(msg Struct) (b []byte, err error) { - s.Transport.Reset() - - if err = msg.Write(s.Protocol); err != nil { - return - } - - if err = s.Protocol.Flush(); err != nil { - return - } - - if err = s.Transport.Flush(); err != nil { - return - } - - b = append(b, s.Transport.Bytes()...) - return -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server.go deleted file mode 100644 index f5e6494e..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import "context" - -// Server is a thrift server -type Server interface { - ProcessorFactoryContext() ProcessorFactoryContext - ServerTransport() ServerTransport - InputTransportFactory() TransportFactory - OutputTransportFactory() TransportFactory - InputProtocolFactory() ProtocolFactory - OutputProtocolFactory() ProtocolFactory - - // Serve starts the server - Serve() error - // ServeContext starts the server, and stops it when the context is cancelled - ServeContext(ctx context.Context) error - // Stop stops the server. This is optional on a per-implementation basis. Not - // all servers are required to be cleanly stoppable. - Stop() error -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server_options.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server_options.go deleted file mode 100644 index 9f223e18..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server_options.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "log" - "os" -) - -// ServerOptions is options needed to run a thrift server -type ServerOptions struct { - quit chan struct{} - log *log.Logger - interceptor Interceptor - - serverTransport ServerTransport - inputTransportFactory TransportFactory - outputTransportFactory TransportFactory - inputProtocolFactory ProtocolFactory - outputProtocolFactory ProtocolFactory -} - -// TransportFactories sets both input and output transport factories -func TransportFactories(factory TransportFactory) func(*ServerOptions) { - return func(server *ServerOptions) { - server.inputTransportFactory = factory - server.outputTransportFactory = factory - } -} - -// InputTransportFactory sets the input transport factory -func InputTransportFactory(factory TransportFactory) func(*ServerOptions) { - return func(server *ServerOptions) { - server.inputTransportFactory = factory - } -} - -// OutputTransportFactory sets the output transport factory -func OutputTransportFactory(factory TransportFactory) func(*ServerOptions) { - return func(server *ServerOptions) { - server.outputTransportFactory = factory - } -} - -// ProtocolFactories sets both input and output protocol factories -func ProtocolFactories(factory ProtocolFactory) func(*ServerOptions) { - return func(server *ServerOptions) { - server.inputProtocolFactory = factory - server.outputProtocolFactory = factory - } -} - -// InputProtocolFactory sets the input protocol factory -func InputProtocolFactory(factory ProtocolFactory) func(*ServerOptions) { - return func(server *ServerOptions) { - server.inputProtocolFactory = factory - } -} - -// OutputProtocolFactory sets the output protocol factory -func OutputProtocolFactory(factory ProtocolFactory) func(*ServerOptions) { - return func(server *ServerOptions) { - server.outputProtocolFactory = factory - } -} - -// Logger sets the logger used for the server -func Logger(log *log.Logger) func(*ServerOptions) { - return func(server *ServerOptions) { - server.log = log - } -} - -// WithInterceptor sets the interceptor for the server -func WithInterceptor(interceptor Interceptor) func(*ServerOptions) { - return func(server *ServerOptions) { - server.interceptor = interceptor - } -} - -func defaultServerOptions(serverTransport ServerTransport) *ServerOptions { - return &ServerOptions{ - serverTransport: serverTransport, - inputTransportFactory: NewTransportFactory(), - outputTransportFactory: NewTransportFactory(), - inputProtocolFactory: NewBinaryProtocolFactoryDefault(), - outputProtocolFactory: NewBinaryProtocolFactoryDefault(), - quit: make(chan struct{}, 1), - log: log.New(os.Stderr, "", log.LstdFlags), - } -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server_socket.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server_socket.go deleted file mode 100644 index fa86d9cd..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server_socket.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "net" - "sync" - "time" -) - -type ServerSocket struct { - listener net.Listener - addr net.Addr - clientTimeout time.Duration - - // Protects the interrupted value to make it thread safe. - mu sync.RWMutex - interrupted bool -} - -func NewServerSocket(listenAddr string) (*ServerSocket, error) { - return NewServerSocketTimeout(listenAddr, 0) -} - -func NewServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*ServerSocket, error) { - addr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - return nil, err - } - return &ServerSocket{addr: addr, clientTimeout: clientTimeout}, nil -} - -func (p *ServerSocket) Listen() error { - if p.IsListening() { - return nil - } - l, err := net.Listen(p.addr.Network(), p.addr.String()) - if err != nil { - return err - } - p.listener = l - return nil -} - -func (p *ServerSocket) Accept() (Transport, error) { - p.mu.RLock() - interrupted := p.interrupted - p.mu.RUnlock() - - if interrupted { - return nil, errTransportInterrupted - } - if p.listener == nil { - return nil, NewTransportException(NOT_OPEN, "No underlying server socket") - } - conn, err := p.listener.Accept() - if err != nil { - return nil, NewTransportExceptionFromError(err) - } - return NewSocket(SocketConn(conn), SocketTimeout(p.clientTimeout)) -} - -// Checks whether the socket is listening. -func (p *ServerSocket) IsListening() bool { - return p.listener != nil -} - -// Connects the socket, creating a new socket object if necessary. -func (p *ServerSocket) Open() error { - if p.IsListening() { - return NewTransportException(ALREADY_OPEN, "Server socket already open") - } - if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil { - return err - } else { - p.listener = l - } - return nil -} - -func (p *ServerSocket) Addr() net.Addr { - if p.listener != nil { - return p.listener.Addr() - } - return p.addr -} - -func (p *ServerSocket) Close() error { - defer func() { - p.listener = nil - }() - if p.IsListening() { - return p.listener.Close() - } - return nil -} - -func (p *ServerSocket) Interrupt() error { - p.mu.Lock() - p.interrupted = true - p.Close() - p.mu.Unlock() - - return nil -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server_transport.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server_transport.go deleted file mode 100644 index 7ba69717..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/server_transport.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -// Server transport. Object which provides client transports. -type ServerTransport interface { - Listen() error - Accept() (Transport, error) - Close() error - - // Optional method implementation. This signals to the server transport - // that it should break out of any accept() or listen() that it is currently - // blocked on. This method, if implemented, MUST be thread safe, as it may - // be called from a different thread context than the other ServerTransport - // methods. - Interrupt() error -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/simple_json_protocol.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/simple_json_protocol.go deleted file mode 100644 index 24798489..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/simple_json_protocol.go +++ /dev/null @@ -1,1394 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "bufio" - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "math" - "strconv" -) - -type _ParseContext int - -const ( - _CONTEXT_IN_TOPLEVEL _ParseContext = 1 - _CONTEXT_IN_LIST_FIRST _ParseContext = 2 - _CONTEXT_IN_LIST _ParseContext = 3 - _CONTEXT_IN_OBJECT_FIRST _ParseContext = 4 - _CONTEXT_IN_OBJECT_NEXT_KEY _ParseContext = 5 - _CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6 -) - -func (p _ParseContext) String() string { - switch p { - case _CONTEXT_IN_TOPLEVEL: - return "TOPLEVEL" - case _CONTEXT_IN_LIST_FIRST: - return "LIST-FIRST" - case _CONTEXT_IN_LIST: - return "LIST" - case _CONTEXT_IN_OBJECT_FIRST: - return "OBJECT-FIRST" - case _CONTEXT_IN_OBJECT_NEXT_KEY: - return "OBJECT-NEXT-KEY" - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - return "OBJECT-NEXT-VALUE" - } - return "UNKNOWN-PARSE-CONTEXT" -} - -// JSON protocol implementation for thrift. -// -// This protocol produces/consumes a simple output format -// suitable for parsing by scripting languages. It should not be -// confused with the full-featured JSONProtocol. -// -type SimpleJSONProtocol struct { - trans Transport - - parseContextStack []int - dumpContext []int - - writer *bufio.Writer - reader *bufio.Reader -} - -// Constructor -func NewSimpleJSONProtocol(t Transport) *SimpleJSONProtocol { - v := &SimpleJSONProtocol{trans: t, - writer: bufio.NewWriter(t), - reader: bufio.NewReader(t), - } - v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) - v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) - return v -} - -// Factory -type SimpleJSONProtocolFactory struct{} - -func (p *SimpleJSONProtocolFactory) GetProtocol(trans Transport) Protocol { - return NewSimpleJSONProtocol(trans) -} - -func NewSimpleJSONProtocolFactory() *SimpleJSONProtocolFactory { - return &SimpleJSONProtocolFactory{} -} - -var ( - JSON_COMMA []byte - JSON_COLON []byte - JSON_LBRACE []byte - JSON_RBRACE []byte - JSON_LBRACKET []byte - JSON_RBRACKET []byte - JSON_QUOTE byte - JSON_QUOTE_BYTES []byte - JSON_NULL []byte - JSON_TRUE []byte - JSON_FALSE []byte - JSON_INFINITY string - JSON_NEGATIVE_INFINITY string - JSON_NAN string - JSON_INFINITY_BYTES []byte - JSON_NEGATIVE_INFINITY_BYTES []byte - JSON_NAN_BYTES []byte - json_nonbase_map_elem_bytes []byte -) - -func init() { - JSON_COMMA = []byte{','} - JSON_COLON = []byte{':'} - JSON_LBRACE = []byte{'{'} - JSON_RBRACE = []byte{'}'} - JSON_LBRACKET = []byte{'['} - JSON_RBRACKET = []byte{']'} - JSON_QUOTE = '"' - JSON_QUOTE_BYTES = []byte{'"'} - JSON_NULL = []byte{'n', 'u', 'l', 'l'} - JSON_TRUE = []byte{'t', 'r', 'u', 'e'} - JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'} - JSON_INFINITY = "Infinity" - JSON_NEGATIVE_INFINITY = "-Infinity" - JSON_NAN = "NaN" - JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} - JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} - JSON_NAN_BYTES = []byte{'N', 'a', 'N'} - json_nonbase_map_elem_bytes = []byte{']', ',', '['} -} - -func jsonQuote(s string) string { - b, _ := json.Marshal(s) - s1 := string(b) - return s1 -} - -func jsonUnquote(s string) (string, bool) { - s1 := new(string) - err := json.Unmarshal([]byte(s), s1) - return *s1, err == nil -} - -func mismatch(expected, actual string) error { - return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual) -} - -func (p *SimpleJSONProtocol) WriteMessageBegin(name string, typeId MessageType, seqId int32) error { - p.resetContextStack() // THRIFT-3735 - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteString(name); e != nil { - return e - } - if e := p.WriteByte(byte(typeId)); e != nil { - return e - } - if e := p.WriteI32(seqId); e != nil { - return e - } - return nil -} - -func (p *SimpleJSONProtocol) WriteMessageEnd() error { - return p.OutputListEnd() -} - -func (p *SimpleJSONProtocol) WriteStructBegin(name string) error { - if e := p.OutputObjectBegin(); e != nil { - return e - } - return nil -} - -func (p *SimpleJSONProtocol) WriteStructEnd() error { - return p.OutputObjectEnd() -} - -func (p *SimpleJSONProtocol) WriteFieldBegin(name string, typeId Type, id int16) error { - if e := p.WriteString(name); e != nil { - return e - } - return nil -} - -func (p *SimpleJSONProtocol) WriteFieldEnd() error { - //return p.OutputListEnd() - return nil -} - -func (p *SimpleJSONProtocol) WriteFieldStop() error { return nil } - -func (p *SimpleJSONProtocol) WriteMapBegin(keyType Type, valueType Type, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteByte(byte(keyType)); e != nil { - return e - } - if e := p.WriteByte(byte(valueType)); e != nil { - return e - } - return p.WriteI32(int32(size)) -} - -func (p *SimpleJSONProtocol) WriteMapEnd() error { - return p.OutputListEnd() -} - -func (p *SimpleJSONProtocol) WriteListBegin(elemType Type, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *SimpleJSONProtocol) WriteListEnd() error { - return p.OutputListEnd() -} - -func (p *SimpleJSONProtocol) WriteSetBegin(elemType Type, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *SimpleJSONProtocol) WriteSetEnd() error { - return p.OutputListEnd() -} - -func (p *SimpleJSONProtocol) WriteBool(b bool) error { - return p.OutputBool(b) -} - -func (p *SimpleJSONProtocol) WriteByte(b byte) error { - return p.WriteI32(int32(b)) -} - -func (p *SimpleJSONProtocol) WriteI16(v int16) error { - return p.WriteI32(int32(v)) -} - -func (p *SimpleJSONProtocol) WriteI32(v int32) error { - return p.OutputI64(int64(v)) -} - -func (p *SimpleJSONProtocol) WriteI64(v int64) error { - return p.OutputI64(int64(v)) -} - -func (p *SimpleJSONProtocol) WriteDouble(v float64) error { - return p.OutputF64(v) -} - -func (p *SimpleJSONProtocol) WriteFloat(v float32) error { - return p.OutputF32(v) -} - -func (p *SimpleJSONProtocol) WriteString(v string) error { - return p.OutputString(v) -} - -func (p *SimpleJSONProtocol) WriteBinary(v []byte) error { - // JSON library only takes in a string, - // not an arbitrary byte array, to ensure bytes are transmitted - // efficiently we must convert this into a valid JSON string - // therefore we use base64 encoding to avoid excessive escaping/quoting - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewProtocolException(e) - } - writer := base64.NewEncoder(base64.StdEncoding, p.writer) - if _, e := writer.Write(v); e != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - return NewProtocolException(e) - } - if e := writer.Close(); e != nil { - return NewProtocolException(e) - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewProtocolException(e) - } - return p.OutputPostValue() -} - -// Reading methods. -func (p *SimpleJSONProtocol) ReadMessageBegin() (name string, typeId MessageType, seqId int32, err error) { - p.resetContextStack() // THRIFT-3735 - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, typeId, seqId, err - } - if name, err = p.ReadString(); err != nil { - return name, typeId, seqId, err - } - bTypeId, err := p.ReadByte() - typeId = MessageType(bTypeId) - if err != nil { - return name, typeId, seqId, err - } - if seqId, err = p.ReadI32(); err != nil { - return name, typeId, seqId, err - } - return name, typeId, seqId, nil -} - -func (p *SimpleJSONProtocol) ReadMessageEnd() error { - return p.ParseListEnd() -} - -func (p *SimpleJSONProtocol) ReadStructBegin() (name string, err error) { - _, err = p.ParseObjectStart() - return "", err -} - -func (p *SimpleJSONProtocol) ReadStructEnd() error { - return p.ParseObjectEnd() -} - -func (p *SimpleJSONProtocol) ReadFieldBegin() (string, Type, int16, error) { - if err := p.ParsePreValue(); err != nil { - return "", STOP, 0, err - } - b, _ := p.reader.Peek(1) - if len(b) > 0 { - switch b[0] { - case JSON_RBRACE[0]: - return "", STOP, 0, nil - case JSON_QUOTE: - p.reader.ReadByte() - name, err := p.ParseStringBody() - // simplejson is not meant to be read back into thrift - // - see http://wiki.apache.org/thrift/ThriftUsageJava - // - use JSON instead - if err != nil { - return name, STOP, 0, err - } - return name, STOP, -1, p.ParsePostValue() - /* - if err = p.ParsePostValue(); err != nil { - return name, STOP, 0, err - } - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, STOP, 0, err - } - bType, err := p.ReadByte() - thetype := Type(bType) - if err != nil { - return name, thetype, 0, err - } - id, err := p.ReadI16() - return name, thetype, id, err - */ - } - e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b)) - return "", STOP, 0, NewProtocolExceptionWithType(INVALID_DATA, e) - } - return "", STOP, 0, NewProtocolException(io.EOF) -} - -func (p *SimpleJSONProtocol) ReadFieldEnd() error { - return nil - //return p.ParseListEnd() -} - -func (p *SimpleJSONProtocol) ReadMapBegin() (keyType Type, valueType Type, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, VOID, 0, e - } - - // read keyType - bKeyType, e := p.ReadByte() - keyType = Type(bKeyType) - if e != nil { - return keyType, valueType, size, e - } - - // read valueType - bValueType, e := p.ReadByte() - valueType = Type(bValueType) - if e != nil { - return keyType, valueType, size, e - } - - // read size - iSize, err := p.ReadI64() - size = int(iSize) - return keyType, valueType, size, err -} - -func (p *SimpleJSONProtocol) ReadMapEnd() error { - return p.ParseListEnd() -} - -func (p *SimpleJSONProtocol) ReadListBegin() (elemType Type, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *SimpleJSONProtocol) ReadListEnd() error { - return p.ParseListEnd() -} - -func (p *SimpleJSONProtocol) ReadSetBegin() (elemType Type, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *SimpleJSONProtocol) ReadSetEnd() error { - return p.ParseListEnd() -} - -func (p *SimpleJSONProtocol) ReadBool() (bool, error) { - var value bool - - if err := p.ParsePreValue(); err != nil { - return value, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 { - switch f[0] { - case JSON_TRUE[0]: - b := make([]byte, len(JSON_TRUE)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewProtocolException(err) - } - if string(b) == string(JSON_TRUE) { - value = true - } else { - e := fmt.Errorf("Expected \"true\" but found: %s", string(b)) - return value, NewProtocolExceptionWithType(INVALID_DATA, e) - } - break - case JSON_FALSE[0]: - b := make([]byte, len(JSON_FALSE)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewProtocolException(err) - } - if string(b) == string(JSON_FALSE) { - value = false - } else { - e := fmt.Errorf("Expected \"false\" but found: %s", string(b)) - return value, NewProtocolExceptionWithType(INVALID_DATA, e) - } - break - case JSON_NULL[0]: - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewProtocolException(err) - } - if string(b) == string(JSON_NULL) { - value = false - } else { - e := fmt.Errorf("Expected \"null\" but found: %s", string(b)) - return value, NewProtocolExceptionWithType(INVALID_DATA, e) - } - default: - e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f)) - return value, NewProtocolExceptionWithType(INVALID_DATA, e) - } - } - return value, p.ParsePostValue() -} - -func (p *SimpleJSONProtocol) ReadByte() (byte, error) { - v, err := p.ReadI64() - return byte(v), err -} - -func (p *SimpleJSONProtocol) ReadI16() (int16, error) { - v, err := p.ReadI64() - return int16(v), err -} - -func (p *SimpleJSONProtocol) ReadI32() (int32, error) { - v, err := p.ReadI64() - return int32(v), err -} - -func (p *SimpleJSONProtocol) ReadI64() (int64, error) { - v, _, err := p.ParseI64() - return v, err -} - -func (p *SimpleJSONProtocol) ReadDouble() (float64, error) { - v, _, err := p.ParseF64() - return v, err -} - -func (p *SimpleJSONProtocol) ReadFloat() (float32, error) { - v, _, err := p.ParseF32() - return v, err -} - -func (p *SimpleJSONProtocol) ReadString() (string, error) { - var v string - if err := p.ParsePreValue(); err != nil { - return v, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseStringBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewProtocolExceptionWithType(INVALID_DATA, e) - } - return v, p.ParsePostValue() -} - -func (p *SimpleJSONProtocol) ReadBinary() ([]byte, error) { - var v []byte - if err := p.ParsePreValue(); err != nil { - return nil, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseBase64EncodedBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewProtocolExceptionWithType(INVALID_DATA, e) - } - - return v, p.ParsePostValue() -} - -func (p *SimpleJSONProtocol) Flush() (err error) { - return NewProtocolException(p.writer.Flush()) -} - -func (p *SimpleJSONProtocol) Skip(fieldType Type) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *SimpleJSONProtocol) Transport() Transport { - return p.trans -} - -func (p *SimpleJSONProtocol) OutputPreValue() error { - cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) - switch cxt { - case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: - if _, e := p.write(JSON_COMMA); e != nil { - return NewProtocolException(e) - } - break - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - if _, e := p.write(JSON_COLON); e != nil { - return NewProtocolException(e) - } - break - } - return nil -} - -func (p *SimpleJSONProtocol) OutputPostValue() error { - cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) - switch cxt { - case _CONTEXT_IN_LIST_FIRST: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST)) - break - case _CONTEXT_IN_OBJECT_FIRST: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break - case _CONTEXT_IN_OBJECT_NEXT_KEY: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) - break - } - return nil -} - -func (p *SimpleJSONProtocol) OutputBool(value bool) error { - if e := p.OutputPreValue(); e != nil { - return e - } - var v string - if value { - v = string(JSON_TRUE) - } else { - v = string(JSON_FALSE) - } - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = jsonQuote(v) - default: - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *SimpleJSONProtocol) OutputNull() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_NULL); e != nil { - return NewProtocolException(e) - } - return p.OutputPostValue() -} - -func (p *SimpleJSONProtocol) OutputF64(value float64) error { - if e := p.OutputPreValue(); e != nil { - return e - } - var v string - if math.IsNaN(value) { - v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE) - } else if math.IsInf(value, 1) { - v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE) - } else if math.IsInf(value, -1) { - v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) - } else { - v = strconv.FormatFloat(value, 'g', -1, 64) - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = string(JSON_QUOTE) + v + string(JSON_QUOTE) - default: - } - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *SimpleJSONProtocol) OutputF32(value float32) error { - if e := p.OutputPreValue(); e != nil { - return e - } - var v string - if math.IsNaN(float64(value)) { - v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE) - } else if math.IsInf(float64(value), 1) { - v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE) - } else if math.IsInf(float64(value), -1) { - v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) - } else { - v = strconv.FormatFloat(float64(value), 'g', -1, 32) - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = string(JSON_QUOTE) + v + string(JSON_QUOTE) - default: - } - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *SimpleJSONProtocol) OutputI64(value int64) error { - if e := p.OutputPreValue(); e != nil { - return e - } - v := strconv.FormatInt(value, 10) - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = jsonQuote(v) - default: - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *SimpleJSONProtocol) OutputString(s string) error { - if e := p.OutputPreValue(); e != nil { - return e - } - if e := p.OutputStringData(jsonQuote(s)); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *SimpleJSONProtocol) OutputStringData(s string) error { - _, e := p.write([]byte(s)) - return NewProtocolException(e) -} - -func (p *SimpleJSONProtocol) OutputObjectBegin() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_LBRACE); e != nil { - return NewProtocolException(e) - } - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST)) - return nil -} - -func (p *SimpleJSONProtocol) OutputObjectEnd() error { - if _, e := p.write(JSON_RBRACE); e != nil { - return NewProtocolException(e) - } - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - if e := p.OutputPostValue(); e != nil { - return e - } - return nil -} - -func (p *SimpleJSONProtocol) OutputListBegin() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_LBRACKET); e != nil { - return NewProtocolException(e) - } - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST)) - return nil -} - -func (p *SimpleJSONProtocol) OutputListEnd() error { - if _, e := p.write(JSON_RBRACKET); e != nil { - return NewProtocolException(e) - } - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - if e := p.OutputPostValue(); e != nil { - return e - } - return nil -} - -func (p *SimpleJSONProtocol) OutputElemListBegin(elemType Type, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteByte(byte(elemType)); e != nil { - return e - } - if e := p.WriteI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *SimpleJSONProtocol) ParsePreValue() error { - if e := p.readNonSignificantWhitespace(); e != nil { - return NewProtocolException(e) - } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) - b, _ := p.reader.Peek(1) - switch cxt { - case _CONTEXT_IN_LIST: - if len(b) > 0 { - switch b[0] { - case JSON_RBRACKET[0]: - return nil - case JSON_COMMA[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b)) - return NewProtocolExceptionWithType(INVALID_DATA, e) - } - } - break - case _CONTEXT_IN_OBJECT_NEXT_KEY: - if len(b) > 0 { - switch b[0] { - case JSON_RBRACE[0]: - return nil - case JSON_COMMA[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b)) - return NewProtocolExceptionWithType(INVALID_DATA, e) - } - } - break - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - if len(b) > 0 { - switch b[0] { - case JSON_COLON[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b)) - return NewProtocolExceptionWithType(INVALID_DATA, e) - } - } - break - } - return nil -} - -func (p *SimpleJSONProtocol) ParsePostValue() error { - if e := p.readNonSignificantWhitespace(); e != nil { - return NewProtocolException(e) - } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) - switch cxt { - case _CONTEXT_IN_LIST_FIRST: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST)) - break - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) - break - } - return nil -} - -func (p *SimpleJSONProtocol) readNonSignificantWhitespace() error { - for { - b, _ := p.reader.Peek(1) - if len(b) < 1 { - return nil - } - switch b[0] { - case ' ', '\r', '\n', '\t': - p.reader.ReadByte() - continue - default: - break - } - break - } - return nil -} - -func (p *SimpleJSONProtocol) ParseStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewProtocolException(err) - } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { - v, ok := jsonUnquote(string(JSON_QUOTE) + line) - if !ok { - return "", NewProtocolException(err) - } - return v, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewProtocolException(err) - } - str := string(JSON_QUOTE) + line + s - v, ok := jsonUnquote(str) - if !ok { - e := fmt.Errorf("Unable to parse as JSON string %s", str) - return "", NewProtocolExceptionWithType(INVALID_DATA, e) - } - return v, nil -} - -func (p *SimpleJSONProtocol) ParseQuotedStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewProtocolException(err) - } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { - return line, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewProtocolException(err) - } - v := line + s - return v, nil -} - -func (p *SimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { - line, err := p.reader.ReadBytes(JSON_QUOTE) - if err != nil { - return line, NewProtocolException(err) - } - line2 := line[0 : len(line)-1] - l := len(line2) - if (l % 4) != 0 { - pad := 4 - (l % 4) - fill := [...]byte{'=', '=', '='} - line2 = append(line2, fill[:pad]...) - l = len(line2) - } - output := make([]byte, base64.StdEncoding.DecodedLen(l)) - n, err := base64.StdEncoding.Decode(output, line2) - return output[0:n], NewProtocolException(err) -} - -func (p *SimpleJSONProtocol) ParseI64() (int64, bool, error) { - if err := p.ParsePreValue(); err != nil { - return 0, false, err - } - var value int64 - var isnull bool - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - isnull = true - } else { - num, err := p.readNumeric() - isnull = (num == nil) - if !isnull { - value = num.Int64() - } - if err != nil { - return value, isnull, err - } - } - return value, isnull, p.ParsePostValue() -} - -func (p *SimpleJSONProtocol) ParseF64() (float64, bool, error) { - if err := p.ParsePreValue(); err != nil { - return 0, false, err - } - var value float64 - var isnull bool - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - isnull = true - } else { - num, err := p.readNumeric() - isnull = (num == nil) - if !isnull { - value = num.Float64() - } - if err != nil { - return value, isnull, err - } - } - return value, isnull, p.ParsePostValue() -} - -func (p *SimpleJSONProtocol) ParseF32() (float32, bool, error) { - if err := p.ParsePreValue(); err != nil { - return 0, false, err - } - var value float32 - var isnull bool - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - isnull = true - } else { - num, err := p.readNumeric() - isnull = (num == nil) - if !isnull { - value = num.Float32() - } - if err != nil { - return value, isnull, err - } - } - return value, isnull, p.ParsePostValue() -} - -func (p *SimpleJSONProtocol) ParseObjectStart() (bool, error) { - if err := p.ParsePreValue(); err != nil { - return false, err - } - var b []byte - b, err := p.reader.Peek(1) - if err != nil { - return false, err - } - if len(b) > 0 && b[0] == JSON_LBRACE[0] { - p.reader.ReadByte() - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST)) - return false, nil - } else if p.safePeekContains(JSON_NULL) { - return true, nil - } - e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b)) - return false, NewProtocolExceptionWithType(INVALID_DATA, e) -} - -func (p *SimpleJSONProtocol) ParseObjectEnd() error { - if isNull, err := p.readIfNull(); isNull || err != nil { - return err - } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) - if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { - e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt) - return NewProtocolExceptionWithType(INVALID_DATA, e) - } - line, err := p.reader.ReadString(JSON_RBRACE[0]) - if err != nil { - return NewProtocolException(err) - } - for _, char := range line { - switch char { - default: - e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line) - return NewProtocolExceptionWithType(INVALID_DATA, e) - case ' ', '\n', '\r', '\t', '}': - break - } - } - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - return p.ParsePostValue() -} - -func (p *SimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { - if e := p.ParsePreValue(); e != nil { - return false, e - } - var b []byte - b, err = p.reader.Peek(1) - if err != nil { - return false, err - } - if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST)) - p.reader.ReadByte() - isNull = false - } else if p.safePeekContains(JSON_NULL) { - isNull = true - } else { - err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b) - } - return isNull, NewProtocolExceptionWithType(INVALID_DATA, err) -} - -func (p *SimpleJSONProtocol) ParseElemListBegin() (elemType Type, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - bElemType, err := p.ReadByte() - elemType = Type(bElemType) - if err != nil { - return elemType, size, err - } - nSize, err2 := p.ReadI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *SimpleJSONProtocol) ParseListEnd() error { - if isNull, err := p.readIfNull(); isNull || err != nil { - return err - } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) - if cxt != _CONTEXT_IN_LIST { - e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt) - return NewProtocolExceptionWithType(INVALID_DATA, e) - } - line, err := p.reader.ReadString(JSON_RBRACKET[0]) - if err != nil { - return NewProtocolException(err) - } - for _, char := range line { - switch char { - default: - e := fmt.Errorf("Expecting end of list \"]\", but found: %q", line) - return NewProtocolExceptionWithType(INVALID_DATA, e) - case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): - break - } - } - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL { - return nil - } - return p.ParsePostValue() -} - -func (p *SimpleJSONProtocol) readSingleValue() (interface{}, Type, error) { - e := p.readNonSignificantWhitespace() - if e != nil { - return nil, VOID, NewProtocolException(e) - } - b, e := p.reader.Peek(1) - if len(b) > 0 { - c := b[0] - switch c { - case JSON_NULL[0]: - buf := make([]byte, len(JSON_NULL)) - _, e := p.reader.Read(buf) - if e != nil { - return nil, VOID, NewProtocolException(e) - } - if string(JSON_NULL) != string(buf) { - e = mismatch(string(JSON_NULL), string(buf)) - return nil, VOID, NewProtocolExceptionWithType(INVALID_DATA, e) - } - return nil, VOID, nil - case JSON_QUOTE: - p.reader.ReadByte() - v, e := p.ParseStringBody() - if e != nil { - return v, UTF8, NewProtocolException(e) - } - if v == JSON_INFINITY { - return INFINITY, DOUBLE, nil - } else if v == JSON_NEGATIVE_INFINITY { - return NEGATIVE_INFINITY, DOUBLE, nil - } else if v == JSON_NAN { - return NAN, DOUBLE, nil - } - return v, UTF8, nil - case JSON_TRUE[0]: - buf := make([]byte, len(JSON_TRUE)) - _, e := p.reader.Read(buf) - if e != nil { - return true, BOOL, NewProtocolException(e) - } - if string(JSON_TRUE) != string(buf) { - e := mismatch(string(JSON_TRUE), string(buf)) - return true, BOOL, NewProtocolExceptionWithType(INVALID_DATA, e) - } - return true, BOOL, nil - case JSON_FALSE[0]: - buf := make([]byte, len(JSON_FALSE)) - _, e := p.reader.Read(buf) - if e != nil { - return false, BOOL, NewProtocolException(e) - } - if string(JSON_FALSE) != string(buf) { - e := mismatch(string(JSON_FALSE), string(buf)) - return false, BOOL, NewProtocolExceptionWithType(INVALID_DATA, e) - } - return false, BOOL, nil - case JSON_LBRACKET[0]: - _, e := p.reader.ReadByte() - return make([]interface{}, 0), LIST, NewProtocolException(e) - case JSON_LBRACE[0]: - _, e := p.reader.ReadByte() - return make(map[string]interface{}), STRUCT, NewProtocolException(e) - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]: - // assume numeric - v, e := p.readNumeric() - return v, DOUBLE, e - default: - e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c)) - return nil, VOID, NewProtocolExceptionWithType(INVALID_DATA, e) - } - } - e = fmt.Errorf("Cannot read a single element while parsing JSON.") - return nil, VOID, NewProtocolExceptionWithType(INVALID_DATA, e) - -} - -func (p *SimpleJSONProtocol) readIfNull() (bool, error) { - cont := true - for cont { - b, _ := p.reader.Peek(1) - if len(b) < 1 { - return false, nil - } - switch b[0] { - default: - return false, nil - case JSON_NULL[0]: - cont = false - break - case ' ', '\n', '\r', '\t': - p.reader.ReadByte() - break - } - } - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - return true, nil - } - return false, nil -} - -func (p *SimpleJSONProtocol) readQuoteIfNext() { - b, _ := p.reader.Peek(1) - if len(b) > 0 && b[0] == JSON_QUOTE { - p.reader.ReadByte() - } -} - -func (p *SimpleJSONProtocol) readNumeric() (Numeric, error) { - isNull, err := p.readIfNull() - if isNull || err != nil { - return NUMERIC_NULL, err - } - hasDecimalPoint := false - nextCanBeSign := true - hasE := false - MAX_LEN := 40 - buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN)) - continueFor := true - inQuotes := false - for continueFor { - c, err := p.reader.ReadByte() - if err != nil { - if err == io.EOF { - break - } - return NUMERIC_NULL, NewProtocolException(err) - } - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - buf.WriteByte(c) - nextCanBeSign = false - case '.': - if hasDecimalPoint { - e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String()) - return NUMERIC_NULL, NewProtocolExceptionWithType(INVALID_DATA, e) - } - if hasE { - e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String()) - return NUMERIC_NULL, NewProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - hasDecimalPoint, nextCanBeSign = true, false - case 'e', 'E': - if hasE { - e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c) - return NUMERIC_NULL, NewProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - hasE, nextCanBeSign = true, true - case '-', '+': - if !nextCanBeSign { - e := fmt.Errorf("Negative sign within number") - return NUMERIC_NULL, NewProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - nextCanBeSign = false - case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]: - p.reader.UnreadByte() - continueFor = false - case JSON_NAN[0]: - if buf.Len() == 0 { - buffer := make([]byte, len(JSON_NAN)) - buffer[0] = c - _, e := p.reader.Read(buffer[1:]) - if e != nil { - return NUMERIC_NULL, NewProtocolException(e) - } - if JSON_NAN != string(buffer) { - e := mismatch(JSON_NAN, string(buffer)) - return NUMERIC_NULL, NewProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return NAN, nil - } else { - e := fmt.Errorf("Unable to parse number starting with character '%c'", c) - return NUMERIC_NULL, NewProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_INFINITY[0]: - if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') { - buffer := make([]byte, len(JSON_INFINITY)) - buffer[0] = c - _, e := p.reader.Read(buffer[1:]) - if e != nil { - return NUMERIC_NULL, NewProtocolException(e) - } - if JSON_INFINITY != string(buffer) { - e := mismatch(JSON_INFINITY, string(buffer)) - return NUMERIC_NULL, NewProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return INFINITY, nil - } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] { - buffer := make([]byte, len(JSON_NEGATIVE_INFINITY)) - buffer[0] = JSON_NEGATIVE_INFINITY[0] - buffer[1] = c - _, e := p.reader.Read(buffer[2:]) - if e != nil { - return NUMERIC_NULL, NewProtocolException(e) - } - if JSON_NEGATIVE_INFINITY != string(buffer) { - e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer)) - return NUMERIC_NULL, NewProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return NEGATIVE_INFINITY, nil - } else { - e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String()) - return NUMERIC_NULL, NewProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_QUOTE: - if !inQuotes { - inQuotes = true - } else { - break - } - default: - e := fmt.Errorf("Unable to parse number starting with character '%c'", c) - return NUMERIC_NULL, NewProtocolExceptionWithType(INVALID_DATA, e) - } - } - if buf.Len() == 0 { - e := fmt.Errorf("Unable to parse number from empty string ''") - return NUMERIC_NULL, NewProtocolExceptionWithType(INVALID_DATA, e) - } - return NewNumericFromJSONString(buf.String(), false), nil -} - -// Safely peeks into the buffer, reading only what is necessary -func (p *SimpleJSONProtocol) safePeekContains(b []byte) bool { - for i := 0; i < len(b); i++ { - a, _ := p.reader.Peek(i + 1) - if len(a) < (i+1) || a[i] != b[i] { - return false - } - } - return true -} - -// Reset the context stack to its initial state. -func (p *SimpleJSONProtocol) resetContextStack() { - p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)} - p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)} -} - -func (p *SimpleJSONProtocol) write(b []byte) (int, error) { - n, err := p.writer.Write(b) - if err != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - } - return n, err -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/simple_server.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/simple_server.go deleted file mode 100644 index 42285ddc..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/simple_server.go +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "context" - "errors" - "runtime/debug" -) - -// ErrServerClosed is returned by the Serve methods after a call to Stop -var ErrServerClosed = errors.New("thrift: Server closed") - -// SimpleServer is a functional but unoptimized server that is easy to -// understand. In its accept loop, it performs an accept on an -// underlying socket, wraps the socket in the ServerTransport, and -// then spins up a gofunc to process requests. -// -// There is one gofunc per active connection that handles all requests -// on the connection. multiple simultaneous requests over a single -// connection are not supported, as the per-connection gofunc reads -// the request, processes it, and writes the response serially -type SimpleServer struct { - processorFactoryContext ProcessorFactoryContext - configurableRequestProcessor func(ctx context.Context, client Transport) error - *ServerOptions -} - -// NewSimpleServer create a new server -func NewSimpleServer(processor Processor, serverTransport ServerTransport, options ...func(*ServerOptions)) *SimpleServer { - return NewSimpleServerContext(NewProcessorContextAdapter(processor), serverTransport, options...) -} - -// NewSimpleServerContext creates a new server that supports contexts -func NewSimpleServerContext(processor ProcessorContext, serverTransport ServerTransport, options ...func(*ServerOptions)) *SimpleServer { - return NewSimpleServerFactoryContext(NewProcessorFactoryContext(processor), serverTransport, options...) -} - -// NewSimpleServer2 is deprecated, used NewSimpleServer instead -func NewSimpleServer2(processor Processor, serverTransport ServerTransport) *SimpleServer { - return NewSimpleServerFactory(NewProcessorFactory(processor), serverTransport) -} - -// NewSimpleServer4 is deprecated, used NewSimpleServer instead -func NewSimpleServer4(processor Processor, serverTransport ServerTransport, transportFactory TransportFactory, protocolFactory ProtocolFactory) *SimpleServer { - return NewSimpleServerFactory( - NewProcessorFactory(processor), - serverTransport, - TransportFactories(transportFactory), - ProtocolFactories(protocolFactory), - ) -} - -// NewSimpleServer6 is deprecated, used NewSimpleServer instead -func NewSimpleServer6(processor Processor, serverTransport ServerTransport, inputTransportFactory TransportFactory, outputTransportFactory TransportFactory, inputProtocolFactory ProtocolFactory, outputProtocolFactory ProtocolFactory) *SimpleServer { - return NewSimpleServerFactory( - NewProcessorFactory(processor), - serverTransport, - InputTransportFactory(inputTransportFactory), - OutputTransportFactory(outputTransportFactory), - InputProtocolFactory(inputProtocolFactory), - OutputProtocolFactory(outputProtocolFactory), - ) -} - -// NewSimpleServerFactory create a new server factory -func NewSimpleServerFactory(processorFactory ProcessorFactory, serverTransport ServerTransport, options ...func(*ServerOptions)) *SimpleServer { - return NewSimpleServerFactoryContext(NewProcessorFactoryContextAdapter(processorFactory), serverTransport, options...) -} - -// NewSimpleServerFactoryContext creates a new server factory that supports contexts. -func NewSimpleServerFactoryContext(processorFactoryContext ProcessorFactoryContext, serverTransport ServerTransport, options ...func(*ServerOptions)) *SimpleServer { - return &SimpleServer{ - processorFactoryContext: processorFactoryContext, - ServerOptions: simpleServerOptions(serverTransport, options...), - } -} - -func simpleServerOptions(t ServerTransport, options ...func(*ServerOptions)) *ServerOptions { - opts := defaultServerOptions(t) - for _, option := range options { - option(opts) - } - return opts -} - -// NewSimpleServerFactory2 is deprecated, used NewSimpleServerFactory instead -func NewSimpleServerFactory2(processorFactory ProcessorFactory, serverTransport ServerTransport) *SimpleServer { - return NewSimpleServerFactory(processorFactory, serverTransport) -} - -// NewSimpleServerFactory4 is deprecated, used NewSimpleServerFactory instead -func NewSimpleServerFactory4(processorFactory ProcessorFactory, serverTransport ServerTransport, transportFactory TransportFactory, protocolFactory ProtocolFactory) *SimpleServer { - return NewSimpleServerFactory( - processorFactory, - serverTransport, - TransportFactories(transportFactory), - ProtocolFactories(protocolFactory), - ) -} - -// NewSimpleServerFactory6 is deprecated, used NewSimpleServerFactory instead -func NewSimpleServerFactory6(processorFactory ProcessorFactory, serverTransport ServerTransport, inputTransportFactory TransportFactory, outputTransportFactory TransportFactory, inputProtocolFactory ProtocolFactory, outputProtocolFactory ProtocolFactory) *SimpleServer { - return NewSimpleServerFactory( - processorFactory, - serverTransport, - InputTransportFactory(inputTransportFactory), - OutputTransportFactory(outputTransportFactory), - InputProtocolFactory(inputProtocolFactory), - OutputProtocolFactory(outputProtocolFactory), - ) -} - -// ProcessorFactoryContext returns the processor factory that supports contexts -func (p *SimpleServer) ProcessorFactoryContext() ProcessorFactoryContext { - return p.processorFactoryContext -} - -// ServerTransport returns the server transport -func (p *SimpleServer) ServerTransport() ServerTransport { - return p.serverTransport -} - -// InputTransportFactory returns the input transport factory -func (p *SimpleServer) InputTransportFactory() TransportFactory { - return p.inputTransportFactory -} - -// OutputTransportFactory returns the output transport factory -func (p *SimpleServer) OutputTransportFactory() TransportFactory { - return p.outputTransportFactory -} - -// InputProtocolFactory returns the input protocolfactory -func (p *SimpleServer) InputProtocolFactory() ProtocolFactory { - return p.inputProtocolFactory -} - -// OutputProtocolFactory returns the output protocol factory -func (p *SimpleServer) OutputProtocolFactory() ProtocolFactory { - return p.outputProtocolFactory -} - -// Listen returns the server transport listener -func (p *SimpleServer) Listen() error { - return p.serverTransport.Listen() -} - -// AcceptLoop runs the accept loop to handle requests -func (p *SimpleServer) AcceptLoop() error { - return p.AcceptLoopContext(context.Background()) -} - -// AcceptLoopContext is an AcceptLoop that supports contexts. -// The context is decorated with ConnInfo and passed down to new clients. -func (p *SimpleServer) AcceptLoopContext(ctx context.Context) error { - for { - client, err := p.serverTransport.Accept() - if err != nil { - select { - case <-p.quit: - return ErrServerClosed - default: - } - return err - } - if client == nil { - continue - } - go func(ctx context.Context, client Transport) { - ctx = p.addConnInfo(ctx, client) - if err := p.processRequests(ctx, client); err != nil { - p.log.Println("thrift: error processing request:", err) - } - }(ctx, client) - } -} - -func (p *SimpleServer) addConnInfo(ctx context.Context, client Transport) context.Context { - if p.processorFactoryContext == nil { - return ctx - } - s, ok := client.(*Socket) - if !ok { - return ctx - } - ctx = context.WithValue(ctx, connInfoKey, ConnInfo{ - LocalAddr: s.Conn().LocalAddr(), - RemoteAddr: s.Conn().RemoteAddr(), - netConn: s.Conn(), - }) - return ctx -} - -// Serve starts listening on the transport and accepting new connections -// and blocks until Stop is called or an error occurs. -func (p *SimpleServer) Serve() error { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - return p.ServeContext(ctx) -} - -// ServeContext behaves like Serve but supports cancellation via context. -func (p *SimpleServer) ServeContext(ctx context.Context) error { - err := p.Listen() - if err != nil { - return err - } - go func() { - <-ctx.Done() - p.Stop() - }() - err = p.AcceptLoopContext(ctx) - if ctx.Err() != nil { - return ctx.Err() - } - return err -} - -// Stop stops the server -func (p *SimpleServer) Stop() error { - p.quit <- struct{}{} - p.serverTransport.Interrupt() - return nil -} - -func (p *SimpleServer) processRequests(ctx context.Context, client Transport) error { - if p.configurableRequestProcessor != nil { - return p.configurableRequestProcessor(ctx, client) - } - - processor := p.processorFactoryContext.GetProcessorContext(client) - var ( - inputTransport, outputTransport Transport - inputProtocol, outputProtocol Protocol - ) - - inputTransport = p.inputTransportFactory.GetTransport(client) - - // Special case for Header, it requires that the transport/protocol for - // input/output is the same object (to track session state). - if _, ok := inputTransport.(*HeaderTransport); ok { - outputTransport = nil - inputProtocol = p.inputProtocolFactory.GetProtocol(inputTransport) - outputProtocol = inputProtocol - } else { - outputTransport = p.outputTransportFactory.GetTransport(client) - inputProtocol = p.inputProtocolFactory.GetProtocol(inputTransport) - outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport) - } - - defer func() { - if err := recover(); err != nil { - p.log.Printf("panic in processor: %v: %s", err, debug.Stack()) - } - }() - if inputTransport != nil { - defer inputTransport.Close() - } - if outputTransport != nil { - defer outputTransport.Close() - } - intProcessor := WrapInterceptorContext(p.interceptor, processor) - for { - keepOpen, exc := ProcessContext(ctx, intProcessor, inputProtocol, outputProtocol) - if exc != nil { - outputProtocol.Flush() - return exc - } - if !keepOpen { - break - } - } - - // graceful exit. client closed connection - return nil -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/socket.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/socket.go deleted file mode 100644 index 0775fef4..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/socket.go +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "errors" - "net" - "time" -) - -type Socket struct { - conn net.Conn - addr net.Addr - timeout time.Duration -} - -// SocketOption is the type used to set options on the socket -type SocketOption func(*Socket) error - -// SocketTimeout sets the timeout -func SocketTimeout(timeout time.Duration) SocketOption { - return func(socket *Socket) error { - socket.timeout = timeout - return nil - } -} - -// SocketAddr sets the socket address -func SocketAddr(hostPort string) SocketOption { - return func(socket *Socket) error { - addr, err := net.ResolveTCPAddr("tcp6", hostPort) - if err != nil { - addr, err = net.ResolveTCPAddr("tcp", hostPort) - if err != nil { - return err - } - } - socket.addr = addr - return nil - } -} - -// SocketConn sets the socket connection -func SocketConn(conn net.Conn) SocketOption { - return func(socket *Socket) error { - socket.conn = conn - socket.addr = conn.RemoteAddr() - return nil - } -} - -// NewSocket creates a net.Conn-backed Transport, given a host and port, -// or an existing connection. -// trans, err := thrift.NewSocket(thrift.SocketAddr("localhost:9090")) -func NewSocket(options ...SocketOption) (*Socket, error) { - socket := &Socket{} - - for _, option := range options { - err := option(socket) - if err != nil { - return nil, err - } - } - - if socket.addr.String() == "" && socket.conn.RemoteAddr().String() == "" { - return nil, errors.New("must supply either an address or a connection") - } - - return socket, nil -} - -// Sets the socket timeout -func (p *Socket) SetTimeout(timeout time.Duration) error { - p.timeout = timeout - return nil -} - -func (p *Socket) pushDeadline(read, write bool) { - var t time.Time - if p.timeout > 0 { - t = time.Now().Add(time.Duration(p.timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *Socket) Open() error { - if p.IsOpen() { - return NewTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTransportException(NOT_OPEN, "Cannot open bad address.") - } - var err error - if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil { - return NewTransportException(NOT_OPEN, err.Error()) - } - return nil -} - -// Retrieve the underlying net.Conn -func (p *Socket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *Socket) IsOpen() bool { - if p.conn == nil { - return false - } - return true -} - -// Closes the socket. -func (p *Socket) Close() error { - // Close the socket - if p.conn != nil { - err := p.conn.Close() - if err != nil { - return err - } - p.conn = nil - } - return nil -} - -//Returns the remote address of the socket. -func (p *Socket) Addr() net.Addr { - return p.addr -} - -func (p *Socket) Read(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - n, err := p.conn.Read(buf) - return n, NewTransportExceptionFromError(err) -} - -func (p *Socket) Write(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *Socket) Flush() error { - return nil -} - -func (p *Socket) Interrupt() error { - if !p.IsOpen() { - return nil - } - return p.conn.Close() -} - -func (p *Socket) RemainingBytes() (num_bytes uint64) { - return UnknownRemaining // the truth is, we just don't know unless framed is used -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/ssl_server_socket.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/ssl_server_socket.go deleted file mode 100644 index 23350f78..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/ssl_server_socket.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "net" - "time" - "crypto/tls" -) - -type SSLServerSocket struct { - listener net.Listener - addr net.Addr - clientTimeout time.Duration - interrupted bool - cfg *tls.Config -} - -func NewSSLServerSocket(listenAddr string, cfg *tls.Config) (*SSLServerSocket, error) { - return NewSSLServerSocketTimeout(listenAddr, cfg, 0) -} - -func NewSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*SSLServerSocket, error) { - addr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - return nil, err - } - return &SSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil -} - -func (p *SSLServerSocket) Listen() error { - if p.IsListening() { - return nil - } - l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg) - if err != nil { - return err - } - p.listener = l - return nil -} - -func (p *SSLServerSocket) Accept() (Transport, error) { - if p.interrupted { - return nil, errTransportInterrupted - } - if p.listener == nil { - return nil, NewTransportException(NOT_OPEN, "No underlying server socket") - } - conn, err := p.listener.Accept() - if err != nil { - return nil, NewTransportExceptionFromError(err) - } - return NewSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil -} - -// Checks whether the socket is listening. -func (p *SSLServerSocket) IsListening() bool { - return p.listener != nil -} - -// Connects the socket, creating a new socket object if necessary. -func (p *SSLServerSocket) Open() error { - if p.IsListening() { - return NewTransportException(ALREADY_OPEN, "Server socket already open") - } - if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil { - return err - } else { - p.listener = l - } - return nil -} - -func (p *SSLServerSocket) Addr() net.Addr { - return p.addr -} - -func (p *SSLServerSocket) Close() error { - defer func() { - p.listener = nil - }() - if p.IsListening() { - return p.listener.Close() - } - return nil -} - -func (p *SSLServerSocket) Interrupt() error { - p.interrupted = true - return nil -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/ssl_socket.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/ssl_socket.go deleted file mode 100644 index d09ad581..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/ssl_socket.go +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "crypto/tls" - "net" - "time" -) - -type SSLSocket struct { - conn net.Conn - // hostPort contains host:port (e.g. "asdf.com:12345"). The field is - // only valid if addr is nil. - hostPort string - // addr is nil when hostPort is not "", and is only used when the - // SSLSocket is constructed from a net.Addr. - addr net.Addr - timeout time.Duration - cfg *tls.Config -} - -// NewSSLSocket creates a net.Conn-backed Transport, given a host and port and tls Configuration -// -// Example: -// trans, err := thrift.NewSSLSocket("localhost:9090", nil) -func NewSSLSocket(hostPort string, cfg *tls.Config) (*SSLSocket, error) { - return NewSSLSocketTimeout(hostPort, cfg, 0) -} - -// NewSSLSocketTimeout creates a net.Conn-backed Transport, given a host and port -// it also accepts a tls Configuration and a timeout as a time.Duration -func NewSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*SSLSocket, error) { - return &SSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil -} - -// Creates a SSLSocket from a net.Addr -func NewSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *SSLSocket { - return &SSLSocket{addr: addr, timeout: timeout, cfg: cfg} -} - -// Creates a SSLSocket from an existing net.Conn -func NewSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *SSLSocket { - return &SSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg} -} - -// Sets the socket timeout -func (p *SSLSocket) SetTimeout(timeout time.Duration) error { - p.timeout = timeout - return nil -} - -func (p *SSLSocket) pushDeadline(read, write bool) { - var t time.Time - if p.timeout > 0 { - t = time.Now().Add(time.Duration(p.timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *SSLSocket) Open() error { - var err error - // If we have a hostname, we need to pass the hostname to tls.Dial for - // certificate hostname checks. - if p.hostPort != "" { - if p.conn, err = tls.Dial("tcp", p.hostPort, p.cfg); err != nil { - return NewTransportException(NOT_OPEN, err.Error()) - } - } else { - if p.IsOpen() { - return NewTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTransportException(NOT_OPEN, "Cannot open bad address.") - } - if p.conn, err = tls.Dial(p.addr.Network(), p.addr.String(), p.cfg); err != nil { - return NewTransportException(NOT_OPEN, err.Error()) - } - } - return nil -} - -// Retrieve the underlying net.Conn -func (p *SSLSocket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *SSLSocket) IsOpen() bool { - if p.conn == nil { - return false - } - return true -} - -// Closes the socket. -func (p *SSLSocket) Close() error { - // Close the socket - if p.conn != nil { - err := p.conn.Close() - if err != nil { - return err - } - p.conn = nil - } - return nil -} - -func (p *SSLSocket) Read(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - n, err := p.conn.Read(buf) - return n, NewTransportExceptionFromError(err) -} - -func (p *SSLSocket) Write(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *SSLSocket) Flush() error { - return nil -} - -func (p *SSLSocket) Interrupt() error { - if !p.IsOpen() { - return nil - } - return p.conn.Close() -} - -func (p *SSLSocket) RemainingBytes() (num_bytes uint64) { - return UnknownRemaining // the truth is, we just don't know unless framed is used -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/transport.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/transport.go deleted file mode 100644 index 72ae8f8e..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/transport.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "errors" - "io" -) - -var errTransportInterrupted = errors.New("Transport Interrupted") - -// Flusher is the interface that wraps the basic Flush method -type Flusher interface { - Flush() (err error) -} - -// ReadSizeProvider is the interface that wraps the basic RemainingBytes method -type ReadSizeProvider interface { - RemainingBytes() (numBytes uint64) -} - -// Transport is an encapsulation of the I/O layer -type Transport interface { - io.ReadWriteCloser - Flusher - ReadSizeProvider - - // Opens the transport for communication - Open() error - - // Returns true if the transport is open - IsOpen() bool -} - -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -// RichTransport is an "enhanced" transport with extra capabilities. -// You need to use one of these to construct protocol. -// Notably, Socket does not implement this interface, and it is always a mistake to use -// Socket directly in protocol. -type RichTransport interface { - io.ReadWriter - io.ByteReader - io.ByteWriter - stringWriter - Flusher - ReadSizeProvider -} - -// UnknownRemaining is used by transports that can not return a real answer -// for RemainingBytes() -const UnknownRemaining = ^uint64(0) diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/transport_exception.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/transport_exception.go deleted file mode 100644 index 4b710495..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/transport_exception.go +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "errors" - "io" -) - -type timeoutable interface { - Timeout() bool -} - -// TransportException is the interface for transport exceptions -type TransportException interface { - Exception - TypeID() int - Err() error -} - -const ( - UNKNOWN_TRANSPORT_EXCEPTION = 0 - NOT_OPEN = 1 - ALREADY_OPEN = 2 - TIMED_OUT = 3 - END_OF_FILE = 4 - INTERRUPTED = 5 - BAD_ARGS = 6 - CORRUPTED_DATA = 7 - NOT_SUPPORTED = 9 - INVALID_STATE = 10 - INVALID_FRAME_SIZE = 11 - SSL_ERROR = 12 - COULD_NOT_BIND = 13 - NETWORK_ERROR = 15 -) - -type transportException struct { - typeID int - err error -} - -func (p *transportException) TypeID() int { - return p.typeID -} - -func (p *transportException) Error() string { - return p.err.Error() -} - -func (p *transportException) Err() error { - return p.err -} - -// NewTransportException creates a new TransportException -func NewTransportException(t int, e string) TransportException { - return &transportException{typeID: t, err: errors.New(e)} -} - -// NewTransportExceptionFromError creates a TransportException from an error -func NewTransportExceptionFromError(e error) TransportException { - if e == nil { - return nil - } - - if t, ok := e.(TransportException); ok { - return t - } - - switch v := e.(type) { - case TransportException: - return v - case timeoutable: - if v.Timeout() { - return &transportException{typeID: TIMED_OUT, err: e} - } - } - - if e == io.EOF { - return &transportException{typeID: END_OF_FILE, err: e} - } - - return &transportException{typeID: UNKNOWN_TRANSPORT_EXCEPTION, err: e} -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/transport_factory.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/transport_factory.go deleted file mode 100644 index 255c489a..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/transport_factory.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -// TransportFactory is used to create wrapped instance of Transports. -// This is used primarily in servers, which get Transports from -// a ServerTransport and then may want to mutate them (i.e. create -// a BufferedTransport from the underlying base transport) -type TransportFactory interface { - GetTransport(trans Transport) Transport -} - -type transportFactory struct{} - -// Return a wrapped instance of the base Transport. -func (p *transportFactory) GetTransport(trans Transport) Transport { - return trans -} - -// NewTransportFactory returns a new TransportFactory -func NewTransportFactory() TransportFactory { - return &transportFactory{} -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/type.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/type.go deleted file mode 100644 index 7933a958..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/type.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -// Type constants in the Thrift protocol -type Type byte - -const ( - STOP = 0 - VOID = 1 - BOOL = 2 - BYTE = 3 - I08 = 3 - DOUBLE = 4 - I16 = 6 - I32 = 8 - I64 = 10 - STRING = 11 - UTF7 = 11 - STRUCT = 12 - MAP = 13 - SET = 14 - LIST = 15 - UTF8 = 16 - UTF16 = 17 - BINARY = 18 - FLOAT = 19 -) - -var typeNames = map[int]string{ - STOP: "STOP", - VOID: "VOID", - BOOL: "BOOL", - BYTE: "BYTE", - I16: "I16", - I32: "I32", - I64: "I64", - STRING: "STRING", - STRUCT: "STRUCT", - MAP: "MAP", - SET: "SET", - LIST: "LIST", - UTF8: "UTF8", - UTF16: "UTF16", -} - -func (p Type) String() string { - if s, ok := typeNames[int(p)]; ok { - return s - } - return "Unknown" -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/zlib_transport.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/zlib_transport.go deleted file mode 100644 index 3f175797..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/zlib_transport.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package thrift - -import ( - "compress/zlib" - "io" - "log" -) - -// ZlibTransportFactory is a factory for ZlibTransport instances -type ZlibTransportFactory struct { - level int -} - -// ZlibTransport is a Transport implementation that makes use of zlib compression. -type ZlibTransport struct { - reader io.ReadCloser - transport Transport - writer *zlib.Writer -} - -// GetTransport constructs a new instance of NewZlibTransport -func (p *ZlibTransportFactory) GetTransport(trans Transport) Transport { - t, _ := NewZlibTransport(trans, p.level) - return t -} - -// NewZlibTransportFactory constructs a new instance of NewZlibTransportFactory -func NewZlibTransportFactory(level int) *ZlibTransportFactory { - return &ZlibTransportFactory{level: level} -} - -// NewZlibTransport constructs a new instance of ZlibTransport -func NewZlibTransport(trans Transport, level int) (*ZlibTransport, error) { - w, err := zlib.NewWriterLevel(trans, level) - if err != nil { - log.Println(err) - return nil, err - } - - return &ZlibTransport{ - writer: w, - transport: trans, - }, nil -} - -// Close closes the reader and writer (flushing any unwritten data) and closes -// the underlying transport. -func (z *ZlibTransport) Close() error { - if z.reader != nil { - if err := z.reader.Close(); err != nil { - return err - } - } - if err := z.writer.Close(); err != nil { - return err - } - return z.transport.Close() -} - -// Flush flushes the writer and its underlying transport. -func (z *ZlibTransport) Flush() error { - if err := z.writer.Flush(); err != nil { - return err - } - return z.transport.Flush() -} - -// IsOpen returns true if the transport is open -func (z *ZlibTransport) IsOpen() bool { - return z.transport.IsOpen() -} - -// Open opens the transport for communication -func (z *ZlibTransport) Open() error { - return z.transport.Open() -} - -func (z *ZlibTransport) Read(p []byte) (int, error) { - if z.reader == nil { - r, err := zlib.NewReader(z.transport) - if err != nil { - return 0, NewTransportExceptionFromError(err) - } - z.reader = r - } - - return z.reader.Read(p) -} - -func (z *ZlibTransport) RemainingBytes() uint64 { - return UnknownRemaining // the truth is, we just don't know unless framed is used -} - -func (z *ZlibTransport) Write(p []byte) (int, error) { - return z.writer.Write(p) -} diff --git a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/zstd.go b/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/zstd.go deleted file mode 100644 index c163a267..00000000 --- a/vendor/github.com/facebook/fbthrift/thrift/lib/go/thrift/zstd.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2019-present Facebook, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package thrift - -import ( - "bytes" - "fmt" -) - -const ( - zstdTransformSupport = false -) - -func zstdRead(rd byteReader) (byteReader, error) { - return nil, fmt.Errorf("zstd compression not supported") -} - -func zstdWriter(tmpbuf *bytes.Buffer, buf *bytes.Buffer) error { - return fmt.Errorf("zstd compression not supported") -} diff --git a/vendor/github.com/vesoft-inc/nebula-go/.editorconfig b/vendor/github.com/vesoft-inc/nebula-go/.editorconfig deleted file mode 100644 index b4195bb8..00000000 --- a/vendor/github.com/vesoft-inc/nebula-go/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*] -insert_final_newline = true -charset = utf-8 -trim_trailing_whitespace = true -indent_style = space -indent_size = 2 - -[{Makefile,go.mod,go.sum,*.go}] -indent_style = tab -indent_size = 2 diff --git a/vendor/github.com/vesoft-inc/nebula-go/.gitignore b/vendor/github.com/vesoft-inc/nebula-go/.gitignore deleted file mode 100644 index 5061dcdf..00000000 --- a/vendor/github.com/vesoft-inc/nebula-go/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -.DS_Store - -.idea/ diff --git a/vendor/github.com/vesoft-inc/nebula-go/README.md b/vendor/github.com/vesoft-inc/nebula-go/README.md deleted file mode 100644 index 737d8736..00000000 --- a/vendor/github.com/vesoft-inc/nebula-go/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# nebula-go - -![build status](https://github.com/vesoft-inc/nebula-go/workflows/build/badge.svg) -![test status](https://github.com/vesoft-inc/nebula-go/workflows/test/badge.svg) - -Official Nebula Go client which communicates with the server using [fbthrift](https://github.com/facebook/fbthrift/). - -Please be careful do not to modify the files in the graph directory, these codes were all generated by fbthrift. - -## Install - -```shell -$ go get -u -v github.com/vesoft-inc/nebula-go@master -``` - -If you get a message like `cannot use path@version syntax in GOPATH mode`, see the instructions below for [enabling go modules](#enabling-go-modules). - -## Usage example - -```go -package main - -import ( - "log" - - nebula "github.com/vesoft-inc/nebula-go" - graph "github.com/vesoft-inc/nebula-go/nebula/graph" -) - -func main() { - client, err := nebula.NewClient("127.0.0.1:3699") - if err != nil { - log.Fatal(err) - } - - if err = client.Connect("username", "password"); err != nil { - log.Fatal(err) - } - defer client.Disconnect() - - resp, err := client.Execute("SHOW HOSTS;") - if err != nil { - log.Fatal(err) - } - - if nebula.IsError(resp) { - log.Printf("ErrorCode: %v, ErrorMsg: %s", resp.GetErrorCode(), resp.GetErrorMsg()) - } -} -``` - -## Enabling go modules - -Dependency management tools are built into go 1.11+ in the form of [go modules](https://github.com/golang/go/wiki/Modules). -If you are using go 1.11 or 1.12 and are working with a project located within `$GOPATH`, you need to do: - -```sh -export GO111MODULE=on -``` - -Ensure your project has a `go.mod` file defined at the root of your project. -If you do not already have one, `go mod init` will create one for you: - -```sh -go mod init -``` - -And then try to get dependencies of `github.com/vesoft-inc/nebula-go` in your go module by simply `go get -u -v github.com/vesoft-inc/nebula-go@master`. diff --git a/vendor/github.com/vesoft-inc/nebula-go/client.go b/vendor/github.com/vesoft-inc/nebula-go/client.go deleted file mode 100644 index 34c63f0b..00000000 --- a/vendor/github.com/vesoft-inc/nebula-go/client.go +++ /dev/null @@ -1,108 +0,0 @@ -/* Copyright (c) 2019 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License, - * attached with Common Clause Condition 1.0, found in the LICENSES directory. - */ - -package ngdb - -import ( - "fmt" - "log" - "time" - - "github.com/facebook/fbthrift/thrift/lib/go/thrift" - graph "github.com/vesoft-inc/nebula-go/nebula/graph" -) - -type GraphOptions struct { - Timeout time.Duration -} - -type GraphOption func(*GraphOptions) - -var defaultGraphOptions = GraphOptions{ - Timeout: 30 * time.Second, -} - -type GraphClient struct { - graph graph.GraphServiceClient - option GraphOptions - sessionID int64 -} - -func WithTimeout(duration time.Duration) GraphOption { - return func(options *GraphOptions) { - options.Timeout = duration - } -} - -func NewClient(address string, opts ...GraphOption) (client *GraphClient, err error) { - options := defaultGraphOptions - for _, opt := range opts { - opt(&options) - } - - timeoutOption := thrift.SocketTimeout(options.Timeout) - addressOption := thrift.SocketAddr(address) - sock, err := thrift.NewSocket(timeoutOption, addressOption) - if err != nil { - return nil, err - } - - transport := thrift.NewBufferedTransport(sock, 128<<10) - - pf := thrift.NewBinaryProtocolFactoryDefault() - graph := &GraphClient{ - graph: *graph.NewGraphServiceClientFactory(transport, pf), - } - return graph, nil -} - -// Open transport and authenticate -func (client *GraphClient) Connect(username, password string) error { - if err := client.graph.Transport.Open(); err != nil { - return err - } - - resp, err := client.graph.Authenticate(username, password) - if err != nil { - log.Printf("Authentication fails, %s", err.Error()) - if e := client.graph.Close(); e != nil { - log.Printf("Fail to close transport, error: %s", e.Error()) - } - return err - } - - if resp.GetErrorCode() != graph.ErrorCode_SUCCEEDED { - log.Printf("Authentication fails, ErrorCode: %v, ErrorMsg: %s", resp.GetErrorCode(), resp.GetErrorMsg()) - return fmt.Errorf(resp.GetErrorMsg()) - } - - client.sessionID = resp.GetSessionID() - - return nil -} - -// Signout and close transport -func (client *GraphClient) Disconnect() { - if err := client.graph.Signout(client.sessionID); err != nil { - log.Printf("Fail to signout, error: %s", err.Error()) - } - - if err := client.graph.Close(); err != nil { - log.Printf("Fail to close transport, error: %s", err.Error()) - } -} - -func (client *GraphClient) Execute(stmt string) (*graph.ExecutionResponse, error) { - return client.graph.Execute(client.sessionID, stmt) -} - -func (client *GraphClient) GetSessionID() int64 { - return client.sessionID -} - -func IsError(resp *graph.ExecutionResponse) bool { - return resp.GetErrorCode() != graph.ErrorCode_SUCCEEDED -} diff --git a/vendor/github.com/vesoft-inc/nebula-go/go.mod b/vendor/github.com/vesoft-inc/nebula-go/go.mod deleted file mode 100644 index 43b3ee30..00000000 --- a/vendor/github.com/vesoft-inc/nebula-go/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/vesoft-inc/nebula-go - -require github.com/facebook/fbthrift v0.0.0-20190922225929-2f9839604e25 - -go 1.13 diff --git a/vendor/github.com/vesoft-inc/nebula-go/go.sum b/vendor/github.com/vesoft-inc/nebula-go/go.sum deleted file mode 100644 index b6d3796a..00000000 --- a/vendor/github.com/vesoft-inc/nebula-go/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/facebook/fbthrift v0.0.0-20190922225929-2f9839604e25 h1:dezRDs9oGYxeavyvcNg/Js+dK6kIvfzERoJ7K8Xkv14= -github.com/facebook/fbthrift v0.0.0-20190922225929-2f9839604e25/go.mod h1:2tncLx5rmw69e5kMBv/yJneERbzrr1yr5fdlnTbu8lU= -github.com/facebook/fbthrift v0.31.0 h1:vVZbmPbaHuC58HCKgsYgj8GKe89nC4dPmFI/8kKNXDE= -github.com/facebook/fbthrift v0.31.0/go.mod h1:2tncLx5rmw69e5kMBv/yJneERbzrr1yr5fdlnTbu8lU= diff --git a/vendor/github.com/vesoft-inc/nebula-go/nebula/constants.go b/vendor/github.com/vesoft-inc/nebula-go/nebula/constants.go deleted file mode 100644 index 1f0a619e..00000000 --- a/vendor/github.com/vesoft-inc/nebula-go/nebula/constants.go +++ /dev/null @@ -1,28 +0,0 @@ -// Autogenerated by Thrift Compiler (facebook) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -// @generated - -package nebula - -import ( - "bytes" - "sync" - "fmt" - thrift "github.com/facebook/fbthrift/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = sync.Mutex{} -var _ = bytes.Equal - -var KInvalidValueType *ValueType - -func init() { -KInvalidValueType = &ValueType{ - Type: 0, -} - -} - diff --git a/vendor/github.com/vesoft-inc/nebula-go/nebula/graph/constants.go b/vendor/github.com/vesoft-inc/nebula-go/nebula/graph/constants.go deleted file mode 100644 index f257591e..00000000 --- a/vendor/github.com/vesoft-inc/nebula-go/nebula/graph/constants.go +++ /dev/null @@ -1,26 +0,0 @@ -// Autogenerated by Thrift Compiler (facebook) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -// @generated - -package graph - -import ( - "bytes" - "sync" - "fmt" - thrift "github.com/facebook/fbthrift/thrift/lib/go/thrift" - nebula0 "github.com/vesoft-inc/nebula-go/nebula" - -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = sync.Mutex{} -var _ = bytes.Equal - -var _ = nebula0.GoUnusedProtection__ - -func init() { -} - diff --git a/vendor/github.com/vesoft-inc/nebula-go/nebula/graph/graphservice.go b/vendor/github.com/vesoft-inc/nebula-go/nebula/graph/graphservice.go deleted file mode 100644 index 44ba6798..00000000 --- a/vendor/github.com/vesoft-inc/nebula-go/nebula/graph/graphservice.go +++ /dev/null @@ -1,1145 +0,0 @@ -// Autogenerated by Thrift Compiler (facebook) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -// @generated - -package graph - -import ( - "bytes" - "sync" - "fmt" - thrift "github.com/facebook/fbthrift/thrift/lib/go/thrift" - nebula0 "github.com/vesoft-inc/nebula-go/nebula" - -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = sync.Mutex{} -var _ = bytes.Equal - -var _ = nebula0.GoUnusedProtection__ -type GraphService interface { - // Parameters: - // - Username - // - Password - Authenticate(username string, password string) (r *AuthResponse, err error) - // Parameters: - // - SessionId - Signout(sessionId int64) (err error) - // Parameters: - // - SessionId - // - Stmt - Execute(sessionId int64, stmt string) (r *ExecutionResponse, err error) -} - -type GraphServiceClient struct { - Transport thrift.Transport - ProtocolFactory thrift.ProtocolFactory - InputProtocol thrift.Protocol - OutputProtocol thrift.Protocol - SeqId int32 -} - -func (client *GraphServiceClient) Close() error { - return client.Transport.Close() -} - -func NewGraphServiceClientFactory(t thrift.Transport, f thrift.ProtocolFactory) *GraphServiceClient { - return &GraphServiceClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewGraphServiceClient(t thrift.Transport, iprot thrift.Protocol, oprot thrift.Protocol) *GraphServiceClient { - return &GraphServiceClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// Parameters: -// - Username -// - Password -func (p *GraphServiceClient) Authenticate(username string, password string) (r *AuthResponse, err error) { - if err = p.sendAuthenticate(username, password); err != nil { return } - return p.recvAuthenticate() -} - -func (p *GraphServiceClient) sendAuthenticate(username string, password string)(err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("authenticate", thrift.CALL, p.SeqId); err != nil { - return - } - args := GraphServiceAuthenticateArgs{ - Username : username, - Password : password, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - - -func (p *GraphServiceClient) recvAuthenticate() (value *AuthResponse, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "authenticate" { - err = thrift.NewApplicationException(thrift.WRONG_METHOD_NAME, "authenticate failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewApplicationException(thrift.BAD_SEQUENCE_ID, "authenticate failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error5 := thrift.NewApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error6 error - error6, err = error5.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error6 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "authenticate failed: invalid message type") - return - } - result := GraphServiceAuthenticateResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - value = result.GetSuccess() - return -} - -// Parameters: -// - SessionId -func (p *GraphServiceClient) Signout(sessionId int64) (err error) { - if err = p.sendSignout(sessionId); err != nil { return } - return -} - -func (p *GraphServiceClient) sendSignout(sessionId int64)(err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("signout", thrift.ONEWAY, p.SeqId); err != nil { - return - } - args := GraphServiceSignoutArgs{ - SessionId : sessionId, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -// Parameters: -// - SessionId -// - Stmt -func (p *GraphServiceClient) Execute(sessionId int64, stmt string) (r *ExecutionResponse, err error) { - if err = p.sendExecute(sessionId, stmt); err != nil { return } - return p.recvExecute() -} - -func (p *GraphServiceClient) sendExecute(sessionId int64, stmt string)(err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("execute", thrift.CALL, p.SeqId); err != nil { - return - } - args := GraphServiceExecuteArgs{ - SessionId : sessionId, - Stmt : stmt, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - - -func (p *GraphServiceClient) recvExecute() (value *ExecutionResponse, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "execute" { - err = thrift.NewApplicationException(thrift.WRONG_METHOD_NAME, "execute failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewApplicationException(thrift.BAD_SEQUENCE_ID, "execute failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error7 := thrift.NewApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error8 error - error8, err = error7.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error8 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "execute failed: invalid message type") - return - } - result := GraphServiceExecuteResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - value = result.GetSuccess() - return -} - - -type GraphServiceThreadsafeClient struct { - Transport thrift.Transport - ProtocolFactory thrift.ProtocolFactory - InputProtocol thrift.Protocol - OutputProtocol thrift.Protocol - SeqId int32 - Mu sync.Mutex -} - -func NewGraphServiceThreadsafeClientFactory(t thrift.Transport, f thrift.ProtocolFactory) *GraphServiceThreadsafeClient { - return &GraphServiceThreadsafeClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewGraphServiceThreadsafeClient(t thrift.Transport, iprot thrift.Protocol, oprot thrift.Protocol) *GraphServiceThreadsafeClient { - return &GraphServiceThreadsafeClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -func (p *GraphServiceThreadsafeClient) Threadsafe() {} - -// Parameters: -// - Username -// - Password -func (p *GraphServiceThreadsafeClient) Authenticate(username string, password string) (r *AuthResponse, err error) { - p.Mu.Lock() - defer p.Mu.Unlock() - if err = p.sendAuthenticate(username, password); err != nil { return } - return p.recvAuthenticate() -} - -func (p *GraphServiceThreadsafeClient) sendAuthenticate(username string, password string)(err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("authenticate", thrift.CALL, p.SeqId); err != nil { - return - } - args := GraphServiceAuthenticateArgs{ - Username : username, - Password : password, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - - -func (p *GraphServiceThreadsafeClient) recvAuthenticate() (value *AuthResponse, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "authenticate" { - err = thrift.NewApplicationException(thrift.WRONG_METHOD_NAME, "authenticate failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewApplicationException(thrift.BAD_SEQUENCE_ID, "authenticate failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error9 := thrift.NewApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error10 error - error10, err = error9.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error10 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "authenticate failed: invalid message type") - return - } - result := GraphServiceAuthenticateResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - value = result.GetSuccess() - return -} - -// Parameters: -// - SessionId -func (p *GraphServiceThreadsafeClient) Signout(sessionId int64) (err error) { - p.Mu.Lock() - defer p.Mu.Unlock() - if err = p.sendSignout(sessionId); err != nil { return } - return -} - -func (p *GraphServiceThreadsafeClient) sendSignout(sessionId int64)(err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("signout", thrift.ONEWAY, p.SeqId); err != nil { - return - } - args := GraphServiceSignoutArgs{ - SessionId : sessionId, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -// Parameters: -// - SessionId -// - Stmt -func (p *GraphServiceThreadsafeClient) Execute(sessionId int64, stmt string) (r *ExecutionResponse, err error) { - p.Mu.Lock() - defer p.Mu.Unlock() - if err = p.sendExecute(sessionId, stmt); err != nil { return } - return p.recvExecute() -} - -func (p *GraphServiceThreadsafeClient) sendExecute(sessionId int64, stmt string)(err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("execute", thrift.CALL, p.SeqId); err != nil { - return - } - args := GraphServiceExecuteArgs{ - SessionId : sessionId, - Stmt : stmt, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - - -func (p *GraphServiceThreadsafeClient) recvExecute() (value *ExecutionResponse, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "execute" { - err = thrift.NewApplicationException(thrift.WRONG_METHOD_NAME, "execute failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewApplicationException(thrift.BAD_SEQUENCE_ID, "execute failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error11 := thrift.NewApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error12 error - error12, err = error11.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error12 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "execute failed: invalid message type") - return - } - result := GraphServiceExecuteResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - value = result.GetSuccess() - return -} - - -type GraphServiceProcessor struct { - processorMap map[string]thrift.ProcessorFunction - handler GraphService -} - -func (p *GraphServiceProcessor) AddToProcessorMap(key string, processor thrift.ProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *GraphServiceProcessor) GetProcessorFunction(key string) (processor thrift.ProcessorFunction, err error) { - if processor, ok := p.processorMap[key]; ok { - return processor, nil - } - return nil, nil // generic error message will be sent -} - -func (p *GraphServiceProcessor) ProcessorMap() map[string]thrift.ProcessorFunction { - return p.processorMap -} - -func NewGraphServiceProcessor(handler GraphService) *GraphServiceProcessor { - self13 := &GraphServiceProcessor{handler:handler, processorMap:make(map[string]thrift.ProcessorFunction)} - self13.processorMap["authenticate"] = &graphServiceProcessorAuthenticate{handler:handler} - self13.processorMap["signout"] = &graphServiceProcessorSignout{handler:handler} - self13.processorMap["execute"] = &graphServiceProcessorExecute{handler:handler} - return self13 -} - -type graphServiceProcessorAuthenticate struct { - handler GraphService -} - -func (p *graphServiceProcessorAuthenticate) Read(iprot thrift.Protocol) (thrift.Struct, thrift.Exception) { - args := GraphServiceAuthenticateArgs{} - if err := args.Read(iprot); err != nil { - return nil, err - } - iprot.ReadMessageEnd() - return &args, nil -} - -func (p *graphServiceProcessorAuthenticate) Write(seqId int32, result thrift.WritableStruct, oprot thrift.Protocol) (err thrift.Exception) { - var err2 error - messageType := thrift.REPLY - switch result.(type) { - case thrift.ApplicationException: - messageType = thrift.EXCEPTION - } - if err2 = oprot.WriteMessageBegin("authenticate", messageType, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - return err -} - -func (p *graphServiceProcessorAuthenticate) Run(argStruct thrift.Struct) (thrift.WritableStruct, thrift.ApplicationException) { - args := argStruct.(*GraphServiceAuthenticateArgs) - var result GraphServiceAuthenticateResult - if retval, err := p.handler.Authenticate(args.Username, args.Password); err != nil { - switch err.(type) { - default: - x := thrift.NewApplicationException(thrift.INTERNAL_ERROR, "Internal error processing authenticate: " + err.Error()) - return x, x - } - } else { - result.Success = retval - } - return &result, nil -} - -type graphServiceProcessorSignout struct { - handler GraphService -} - -func (p *graphServiceProcessorSignout) Read(iprot thrift.Protocol) (thrift.Struct, thrift.Exception) { - args := GraphServiceSignoutArgs{} - if err := args.Read(iprot); err != nil { - return nil, err - } - iprot.ReadMessageEnd() - return &args, nil -} - -func (p *graphServiceProcessorSignout) Write(seqId int32, result thrift.WritableStruct, oprot thrift.Protocol) (err thrift.Exception) { - var err2 error - messageType := thrift.REPLY - switch result.(type) { - case thrift.ApplicationException: - messageType = thrift.EXCEPTION - } - if err2 = oprot.WriteMessageBegin("signout", messageType, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - return err -} - -func (p *graphServiceProcessorSignout) Run(argStruct thrift.Struct) (thrift.WritableStruct, thrift.ApplicationException) { - args := argStruct.(*GraphServiceSignoutArgs) - if err := p.handler.Signout(args.SessionId); err != nil { - switch err.(type) { - default: - x := thrift.NewApplicationException(thrift.INTERNAL_ERROR, "Internal error processing signout: " + err.Error()) - return x, x - } - } - return nil, nil -} - -type graphServiceProcessorExecute struct { - handler GraphService -} - -func (p *graphServiceProcessorExecute) Read(iprot thrift.Protocol) (thrift.Struct, thrift.Exception) { - args := GraphServiceExecuteArgs{} - if err := args.Read(iprot); err != nil { - return nil, err - } - iprot.ReadMessageEnd() - return &args, nil -} - -func (p *graphServiceProcessorExecute) Write(seqId int32, result thrift.WritableStruct, oprot thrift.Protocol) (err thrift.Exception) { - var err2 error - messageType := thrift.REPLY - switch result.(type) { - case thrift.ApplicationException: - messageType = thrift.EXCEPTION - } - if err2 = oprot.WriteMessageBegin("execute", messageType, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - return err -} - -func (p *graphServiceProcessorExecute) Run(argStruct thrift.Struct) (thrift.WritableStruct, thrift.ApplicationException) { - args := argStruct.(*GraphServiceExecuteArgs) - var result GraphServiceExecuteResult - if retval, err := p.handler.Execute(args.SessionId, args.Stmt); err != nil { - switch err.(type) { - default: - x := thrift.NewApplicationException(thrift.INTERNAL_ERROR, "Internal error processing execute: " + err.Error()) - return x, x - } - } else { - result.Success = retval - } - return &result, nil -} - - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Username -// - Password -type GraphServiceAuthenticateArgs struct { - Username string `thrift:"username,1" db:"username" json:"username"` - Password string `thrift:"password,2" db:"password" json:"password"` -} - -func NewGraphServiceAuthenticateArgs() *GraphServiceAuthenticateArgs { - return &GraphServiceAuthenticateArgs{} -} - - -func (p *GraphServiceAuthenticateArgs) GetUsername() string { - return p.Username -} - -func (p *GraphServiceAuthenticateArgs) GetPassword() string { - return p.Password -} -func (p *GraphServiceAuthenticateArgs) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *GraphServiceAuthenticateArgs) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Username = v -} - return nil -} - -func (p *GraphServiceAuthenticateArgs) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.Password = v -} - return nil -} - -func (p *GraphServiceAuthenticateArgs) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("authenticate_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *GraphServiceAuthenticateArgs) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("username", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:username: ", p), err) } - if err := oprot.WriteString(string(p.Username)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.username (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:username: ", p), err) } - return err -} - -func (p *GraphServiceAuthenticateArgs) writeField2(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("password", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:password: ", p), err) } - if err := oprot.WriteString(string(p.Password)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.password (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:password: ", p), err) } - return err -} - -func (p *GraphServiceAuthenticateArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("GraphServiceAuthenticateArgs(%+v)", *p) -} - -// Attributes: -// - Success -type GraphServiceAuthenticateResult struct { - Success *AuthResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewGraphServiceAuthenticateResult() *GraphServiceAuthenticateResult { - return &GraphServiceAuthenticateResult{} -} - -var GraphServiceAuthenticateResult_Success_DEFAULT *AuthResponse -func (p *GraphServiceAuthenticateResult) GetSuccess() *AuthResponse { - if !p.IsSetSuccess() { - return GraphServiceAuthenticateResult_Success_DEFAULT - } -return p.Success -} -func (p *GraphServiceAuthenticateResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *GraphServiceAuthenticateResult) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 0: - if err := p.ReadField0(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *GraphServiceAuthenticateResult) ReadField0(iprot thrift.Protocol) error { - p.Success = NewAuthResponse() - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *GraphServiceAuthenticateResult) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("authenticate_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField0(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *GraphServiceAuthenticateResult) writeField0(oprot thrift.Protocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } - } - return err -} - -func (p *GraphServiceAuthenticateResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("GraphServiceAuthenticateResult(%+v)", *p) -} - -// Attributes: -// - SessionId -type GraphServiceSignoutArgs struct { - SessionId int64 `thrift:"sessionId,1" db:"sessionId" json:"sessionId"` -} - -func NewGraphServiceSignoutArgs() *GraphServiceSignoutArgs { - return &GraphServiceSignoutArgs{} -} - - -func (p *GraphServiceSignoutArgs) GetSessionId() int64 { - return p.SessionId -} -func (p *GraphServiceSignoutArgs) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *GraphServiceSignoutArgs) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.SessionId = v -} - return nil -} - -func (p *GraphServiceSignoutArgs) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("signout_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *GraphServiceSignoutArgs) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("sessionId", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionId: ", p), err) } - if err := oprot.WriteI64(int64(p.SessionId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.sessionId (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionId: ", p), err) } - return err -} - -func (p *GraphServiceSignoutArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("GraphServiceSignoutArgs(%+v)", *p) -} - -// Attributes: -// - SessionId -// - Stmt -type GraphServiceExecuteArgs struct { - SessionId int64 `thrift:"sessionId,1" db:"sessionId" json:"sessionId"` - Stmt string `thrift:"stmt,2" db:"stmt" json:"stmt"` -} - -func NewGraphServiceExecuteArgs() *GraphServiceExecuteArgs { - return &GraphServiceExecuteArgs{} -} - - -func (p *GraphServiceExecuteArgs) GetSessionId() int64 { - return p.SessionId -} - -func (p *GraphServiceExecuteArgs) GetStmt() string { - return p.Stmt -} -func (p *GraphServiceExecuteArgs) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *GraphServiceExecuteArgs) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.SessionId = v -} - return nil -} - -func (p *GraphServiceExecuteArgs) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.Stmt = v -} - return nil -} - -func (p *GraphServiceExecuteArgs) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("execute_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *GraphServiceExecuteArgs) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("sessionId", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionId: ", p), err) } - if err := oprot.WriteI64(int64(p.SessionId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.sessionId (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionId: ", p), err) } - return err -} - -func (p *GraphServiceExecuteArgs) writeField2(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("stmt", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:stmt: ", p), err) } - if err := oprot.WriteString(string(p.Stmt)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.stmt (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:stmt: ", p), err) } - return err -} - -func (p *GraphServiceExecuteArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("GraphServiceExecuteArgs(%+v)", *p) -} - -// Attributes: -// - Success -type GraphServiceExecuteResult struct { - Success *ExecutionResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewGraphServiceExecuteResult() *GraphServiceExecuteResult { - return &GraphServiceExecuteResult{} -} - -var GraphServiceExecuteResult_Success_DEFAULT *ExecutionResponse -func (p *GraphServiceExecuteResult) GetSuccess() *ExecutionResponse { - if !p.IsSetSuccess() { - return GraphServiceExecuteResult_Success_DEFAULT - } -return p.Success -} -func (p *GraphServiceExecuteResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *GraphServiceExecuteResult) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 0: - if err := p.ReadField0(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *GraphServiceExecuteResult) ReadField0(iprot thrift.Protocol) error { - p.Success = NewExecutionResponse() - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *GraphServiceExecuteResult) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("execute_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField0(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *GraphServiceExecuteResult) writeField0(oprot thrift.Protocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } - } - return err -} - -func (p *GraphServiceExecuteResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("GraphServiceExecuteResult(%+v)", *p) -} - - diff --git a/vendor/github.com/vesoft-inc/nebula-go/nebula/graph/ttypes.go b/vendor/github.com/vesoft-inc/nebula-go/nebula/graph/ttypes.go deleted file mode 100644 index fdca0619..00000000 --- a/vendor/github.com/vesoft-inc/nebula-go/nebula/graph/ttypes.go +++ /dev/null @@ -1,2373 +0,0 @@ -// Autogenerated by Thrift Compiler (facebook) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -// @generated - -package graph - -import ( - "bytes" - "sync" - "fmt" - thrift "github.com/facebook/fbthrift/thrift/lib/go/thrift" - nebula0 "github.com/vesoft-inc/nebula-go/nebula" - -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = sync.Mutex{} -var _ = bytes.Equal - -var _ = nebula0.GoUnusedProtection__ -var GoUnusedProtection__ int; - -type ErrorCode int64 -const ( - ErrorCode_SUCCEEDED ErrorCode = 0 - ErrorCode_E_DISCONNECTED ErrorCode = -1 - ErrorCode_E_FAIL_TO_CONNECT ErrorCode = -2 - ErrorCode_E_RPC_FAILURE ErrorCode = -3 - ErrorCode_E_BAD_USERNAME_PASSWORD ErrorCode = -4 - ErrorCode_E_SESSION_INVALID ErrorCode = -5 - ErrorCode_E_SESSION_TIMEOUT ErrorCode = -6 - ErrorCode_E_SYNTAX_ERROR ErrorCode = -7 - ErrorCode_E_EXECUTION_ERROR ErrorCode = -8 - ErrorCode_E_STATEMENT_EMTPY ErrorCode = -9 - ErrorCode_E_USER_NOT_FOUND ErrorCode = -10 - ErrorCode_E_BAD_PERMISSION ErrorCode = -11 -) - -var ErrorCodeToName = map[ErrorCode]string { - ErrorCode_SUCCEEDED: "SUCCEEDED", - ErrorCode_E_DISCONNECTED: "E_DISCONNECTED", - ErrorCode_E_FAIL_TO_CONNECT: "E_FAIL_TO_CONNECT", - ErrorCode_E_RPC_FAILURE: "E_RPC_FAILURE", - ErrorCode_E_BAD_USERNAME_PASSWORD: "E_BAD_USERNAME_PASSWORD", - ErrorCode_E_SESSION_INVALID: "E_SESSION_INVALID", - ErrorCode_E_SESSION_TIMEOUT: "E_SESSION_TIMEOUT", - ErrorCode_E_SYNTAX_ERROR: "E_SYNTAX_ERROR", - ErrorCode_E_EXECUTION_ERROR: "E_EXECUTION_ERROR", - ErrorCode_E_STATEMENT_EMTPY: "E_STATEMENT_EMTPY", - ErrorCode_E_USER_NOT_FOUND: "E_USER_NOT_FOUND", - ErrorCode_E_BAD_PERMISSION: "E_BAD_PERMISSION", -} - -var ErrorCodeToValue = map[string]ErrorCode { - "SUCCEEDED": ErrorCode_SUCCEEDED, - "E_DISCONNECTED": ErrorCode_E_DISCONNECTED, - "E_FAIL_TO_CONNECT": ErrorCode_E_FAIL_TO_CONNECT, - "E_RPC_FAILURE": ErrorCode_E_RPC_FAILURE, - "E_BAD_USERNAME_PASSWORD": ErrorCode_E_BAD_USERNAME_PASSWORD, - "E_SESSION_INVALID": ErrorCode_E_SESSION_INVALID, - "E_SESSION_TIMEOUT": ErrorCode_E_SESSION_TIMEOUT, - "E_SYNTAX_ERROR": ErrorCode_E_SYNTAX_ERROR, - "E_EXECUTION_ERROR": ErrorCode_E_EXECUTION_ERROR, - "E_STATEMENT_EMTPY": ErrorCode_E_STATEMENT_EMTPY, - "E_USER_NOT_FOUND": ErrorCode_E_USER_NOT_FOUND, - "E_BAD_PERMISSION": ErrorCode_E_BAD_PERMISSION, -} - -func (p ErrorCode) String() string { - if v, ok := ErrorCodeToName[p]; ok { - return v - } - return "" -} - -func ErrorCodeFromString(s string) (ErrorCode, error) { - if v, ok := ErrorCodeToValue[s]; ok { - return v, nil - } - return ErrorCode(0), fmt.Errorf("not a valid ErrorCode string") -} - -func ErrorCodePtr(v ErrorCode) *ErrorCode { return &v } - -type IdType int64 - -func IdTypePtr(v IdType) *IdType { return &v } - -type Timestamp int64 - -func TimestampPtr(v Timestamp) *Timestamp { return &v } - -type Year int16 - -func YearPtr(v Year) *Year { return &v } - -// Attributes: -// - Year -// - Month -type YearMonth struct { - Year int16 `thrift:"year,1" db:"year" json:"year"` - Month int8 `thrift:"month,2" db:"month" json:"month"` -} - -func NewYearMonth() *YearMonth { - return &YearMonth{} -} - - -func (p *YearMonth) GetYear() int16 { - return p.Year -} - -func (p *YearMonth) GetMonth() int8 { - return p.Month -} -func (p *YearMonth) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *YearMonth) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI16(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Year = v -} - return nil -} - -func (p *YearMonth) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadByte(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - temp := int8(v) - p.Month = temp -} - return nil -} - -func (p *YearMonth) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("YearMonth"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *YearMonth) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("year", thrift.I16, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:year: ", p), err) } - if err := oprot.WriteI16(int16(p.Year)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.year (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:year: ", p), err) } - return err -} - -func (p *YearMonth) writeField2(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("month", thrift.BYTE, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:month: ", p), err) } - if err := oprot.WriteByte(byte(p.Month)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.month (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:month: ", p), err) } - return err -} - -func (p *YearMonth) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("YearMonth(%+v)", *p) -} - -// Attributes: -// - Year -// - Month -// - Day -type Date struct { - Year int16 `thrift:"year,1" db:"year" json:"year"` - Month int8 `thrift:"month,2" db:"month" json:"month"` - Day int8 `thrift:"day,3" db:"day" json:"day"` -} - -func NewDate() *Date { - return &Date{} -} - - -func (p *Date) GetYear() int16 { - return p.Year -} - -func (p *Date) GetMonth() int8 { - return p.Month -} - -func (p *Date) GetDay() int8 { - return p.Day -} -func (p *Date) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - case 3: - if err := p.ReadField3(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Date) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI16(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Year = v -} - return nil -} - -func (p *Date) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadByte(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - temp := int8(v) - p.Month = temp -} - return nil -} - -func (p *Date) ReadField3(iprot thrift.Protocol) error { - if v, err := iprot.ReadByte(); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - temp := int8(v) - p.Day = temp -} - return nil -} - -func (p *Date) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("Date"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Date) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("year", thrift.I16, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:year: ", p), err) } - if err := oprot.WriteI16(int16(p.Year)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.year (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:year: ", p), err) } - return err -} - -func (p *Date) writeField2(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("month", thrift.BYTE, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:month: ", p), err) } - if err := oprot.WriteByte(byte(p.Month)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.month (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:month: ", p), err) } - return err -} - -func (p *Date) writeField3(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("day", thrift.BYTE, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:day: ", p), err) } - if err := oprot.WriteByte(byte(p.Day)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.day (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:day: ", p), err) } - return err -} - -func (p *Date) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Date(%+v)", *p) -} - -// Attributes: -// - Year -// - Month -// - Day -// - Hour -// - Minute -// - Second -// - Millisec -// - Microsec -type DateTime struct { - Year int16 `thrift:"year,1" db:"year" json:"year"` - Month int8 `thrift:"month,2" db:"month" json:"month"` - Day int8 `thrift:"day,3" db:"day" json:"day"` - Hour int8 `thrift:"hour,4" db:"hour" json:"hour"` - Minute int8 `thrift:"minute,5" db:"minute" json:"minute"` - Second int8 `thrift:"second,6" db:"second" json:"second"` - Millisec int16 `thrift:"millisec,7" db:"millisec" json:"millisec"` - Microsec int16 `thrift:"microsec,8" db:"microsec" json:"microsec"` -} - -func NewDateTime() *DateTime { - return &DateTime{} -} - - -func (p *DateTime) GetYear() int16 { - return p.Year -} - -func (p *DateTime) GetMonth() int8 { - return p.Month -} - -func (p *DateTime) GetDay() int8 { - return p.Day -} - -func (p *DateTime) GetHour() int8 { - return p.Hour -} - -func (p *DateTime) GetMinute() int8 { - return p.Minute -} - -func (p *DateTime) GetSecond() int8 { - return p.Second -} - -func (p *DateTime) GetMillisec() int16 { - return p.Millisec -} - -func (p *DateTime) GetMicrosec() int16 { - return p.Microsec -} -func (p *DateTime) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - case 3: - if err := p.ReadField3(iprot); err != nil { - return err - } - case 4: - if err := p.ReadField4(iprot); err != nil { - return err - } - case 5: - if err := p.ReadField5(iprot); err != nil { - return err - } - case 6: - if err := p.ReadField6(iprot); err != nil { - return err - } - case 7: - if err := p.ReadField7(iprot); err != nil { - return err - } - case 8: - if err := p.ReadField8(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DateTime) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI16(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Year = v -} - return nil -} - -func (p *DateTime) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadByte(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - temp := int8(v) - p.Month = temp -} - return nil -} - -func (p *DateTime) ReadField3(iprot thrift.Protocol) error { - if v, err := iprot.ReadByte(); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - temp := int8(v) - p.Day = temp -} - return nil -} - -func (p *DateTime) ReadField4(iprot thrift.Protocol) error { - if v, err := iprot.ReadByte(); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - temp := int8(v) - p.Hour = temp -} - return nil -} - -func (p *DateTime) ReadField5(iprot thrift.Protocol) error { - if v, err := iprot.ReadByte(); err != nil { - return thrift.PrependError("error reading field 5: ", err) -} else { - temp := int8(v) - p.Minute = temp -} - return nil -} - -func (p *DateTime) ReadField6(iprot thrift.Protocol) error { - if v, err := iprot.ReadByte(); err != nil { - return thrift.PrependError("error reading field 6: ", err) -} else { - temp := int8(v) - p.Second = temp -} - return nil -} - -func (p *DateTime) ReadField7(iprot thrift.Protocol) error { - if v, err := iprot.ReadI16(); err != nil { - return thrift.PrependError("error reading field 7: ", err) -} else { - p.Millisec = v -} - return nil -} - -func (p *DateTime) ReadField8(iprot thrift.Protocol) error { - if v, err := iprot.ReadI16(); err != nil { - return thrift.PrependError("error reading field 8: ", err) -} else { - p.Microsec = v -} - return nil -} - -func (p *DateTime) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("DateTime"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { return err } - if err := p.writeField4(oprot); err != nil { return err } - if err := p.writeField5(oprot); err != nil { return err } - if err := p.writeField6(oprot); err != nil { return err } - if err := p.writeField7(oprot); err != nil { return err } - if err := p.writeField8(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *DateTime) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("year", thrift.I16, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:year: ", p), err) } - if err := oprot.WriteI16(int16(p.Year)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.year (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:year: ", p), err) } - return err -} - -func (p *DateTime) writeField2(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("month", thrift.BYTE, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:month: ", p), err) } - if err := oprot.WriteByte(byte(p.Month)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.month (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:month: ", p), err) } - return err -} - -func (p *DateTime) writeField3(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("day", thrift.BYTE, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:day: ", p), err) } - if err := oprot.WriteByte(byte(p.Day)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.day (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:day: ", p), err) } - return err -} - -func (p *DateTime) writeField4(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("hour", thrift.BYTE, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:hour: ", p), err) } - if err := oprot.WriteByte(byte(p.Hour)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.hour (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:hour: ", p), err) } - return err -} - -func (p *DateTime) writeField5(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("minute", thrift.BYTE, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:minute: ", p), err) } - if err := oprot.WriteByte(byte(p.Minute)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.minute (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:minute: ", p), err) } - return err -} - -func (p *DateTime) writeField6(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("second", thrift.BYTE, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:second: ", p), err) } - if err := oprot.WriteByte(byte(p.Second)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.second (6) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:second: ", p), err) } - return err -} - -func (p *DateTime) writeField7(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("millisec", thrift.I16, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:millisec: ", p), err) } - if err := oprot.WriteI16(int16(p.Millisec)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.millisec (7) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:millisec: ", p), err) } - return err -} - -func (p *DateTime) writeField8(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("microsec", thrift.I16, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:microsec: ", p), err) } - if err := oprot.WriteI16(int16(p.Microsec)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.microsec (8) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:microsec: ", p), err) } - return err -} - -func (p *DateTime) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DateTime(%+v)", *p) -} - -// Attributes: -// - Id -type Vertex struct { - Id nebula0.VertexID `thrift:"id,1" db:"id" json:"id"` -} - -func NewVertex() *Vertex { - return &Vertex{} -} - - -func (p *Vertex) GetId() nebula0.VertexID { - return p.Id -} -func (p *Vertex) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Vertex) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := nebula0.VertexID(v) - p.Id = temp -} - return nil -} - -func (p *Vertex) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("Vertex"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Vertex) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:id: ", p), err) } - if err := oprot.WriteI64(int64(p.Id)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.id (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:id: ", p), err) } - return err -} - -func (p *Vertex) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Vertex(%+v)", *p) -} - -// Attributes: -// - Type -// - Ranking -// - Src -// - Dst -type Edge struct { - Type []byte `thrift:"type,1" db:"type" json:"type"` - Ranking nebula0.EdgeRanking `thrift:"ranking,2" db:"ranking" json:"ranking"` - Src *nebula0.VertexID `thrift:"src,3" db:"src" json:"src,omitempty"` - Dst *nebula0.VertexID `thrift:"dst,4" db:"dst" json:"dst,omitempty"` -} - -func NewEdge() *Edge { - return &Edge{} -} - - -func (p *Edge) GetType() []byte { - return p.Type -} - -func (p *Edge) GetRanking() nebula0.EdgeRanking { - return p.Ranking -} -var Edge_Src_DEFAULT nebula0.VertexID -func (p *Edge) GetSrc() nebula0.VertexID { - if !p.IsSetSrc() { - return Edge_Src_DEFAULT - } -return *p.Src -} -var Edge_Dst_DEFAULT nebula0.VertexID -func (p *Edge) GetDst() nebula0.VertexID { - if !p.IsSetDst() { - return Edge_Dst_DEFAULT - } -return *p.Dst -} -func (p *Edge) IsSetSrc() bool { - return p.Src != nil -} - -func (p *Edge) IsSetDst() bool { - return p.Dst != nil -} - -func (p *Edge) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - case 3: - if err := p.ReadField3(iprot); err != nil { - return err - } - case 4: - if err := p.ReadField4(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Edge) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Type = v -} - return nil -} - -func (p *Edge) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - temp := nebula0.EdgeRanking(v) - p.Ranking = temp -} - return nil -} - -func (p *Edge) ReadField3(iprot thrift.Protocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - temp := nebula0.VertexID(v) - p.Src = &temp -} - return nil -} - -func (p *Edge) ReadField4(iprot thrift.Protocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - temp := nebula0.VertexID(v) - p.Dst = &temp -} - return nil -} - -func (p *Edge) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("Edge"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { return err } - if err := p.writeField4(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Edge) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("type", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:type: ", p), err) } - if err := oprot.WriteBinary(p.Type); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.type (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:type: ", p), err) } - return err -} - -func (p *Edge) writeField2(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("ranking", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ranking: ", p), err) } - if err := oprot.WriteI64(int64(p.Ranking)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ranking (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ranking: ", p), err) } - return err -} - -func (p *Edge) writeField3(oprot thrift.Protocol) (err error) { - if p.IsSetSrc() { - if err := oprot.WriteFieldBegin("src", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:src: ", p), err) } - if err := oprot.WriteI64(int64(*p.Src)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.src (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:src: ", p), err) } - } - return err -} - -func (p *Edge) writeField4(oprot thrift.Protocol) (err error) { - if p.IsSetDst() { - if err := oprot.WriteFieldBegin("dst", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:dst: ", p), err) } - if err := oprot.WriteI64(int64(*p.Dst)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.dst (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:dst: ", p), err) } - } - return err -} - -func (p *Edge) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Edge(%+v)", *p) -} - -// Attributes: -// - Vertex -// - Edge -type PathEntry struct { - Vertex *Vertex `thrift:"vertex,1" db:"vertex" json:"vertex,omitempty"` - Edge *Edge `thrift:"edge,2" db:"edge" json:"edge,omitempty"` -} - -func NewPathEntry() *PathEntry { - return &PathEntry{} -} - -var PathEntry_Vertex_DEFAULT *Vertex -func (p *PathEntry) GetVertex() *Vertex { - if !p.IsSetVertex() { - return PathEntry_Vertex_DEFAULT - } -return p.Vertex -} -var PathEntry_Edge_DEFAULT *Edge -func (p *PathEntry) GetEdge() *Edge { - if !p.IsSetEdge() { - return PathEntry_Edge_DEFAULT - } -return p.Edge -} -func (p *PathEntry) CountSetFieldsPathEntry() int { - count := 0 - if (p.IsSetVertex()) { - count++ - } - if (p.IsSetEdge()) { - count++ - } - return count - -} - -func (p *PathEntry) IsSetVertex() bool { - return p.Vertex != nil -} - -func (p *PathEntry) IsSetEdge() bool { - return p.Edge != nil -} - -func (p *PathEntry) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *PathEntry) ReadField1(iprot thrift.Protocol) error { - p.Vertex = NewVertex() - if err := p.Vertex.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Vertex), err) - } - return nil -} - -func (p *PathEntry) ReadField2(iprot thrift.Protocol) error { - p.Edge = NewEdge() - if err := p.Edge.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Edge), err) - } - return nil -} - -func (p *PathEntry) Write(oprot thrift.Protocol) error { - if c := p.CountSetFieldsPathEntry(); c != 1 { - return fmt.Errorf("%T write union: exactly one field must be set (%d set).", p, c) - } - if err := oprot.WriteStructBegin("PathEntry"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *PathEntry) writeField1(oprot thrift.Protocol) (err error) { - if p.IsSetVertex() { - if err := oprot.WriteFieldBegin("vertex", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:vertex: ", p), err) } - if err := p.Vertex.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Vertex), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:vertex: ", p), err) } - } - return err -} - -func (p *PathEntry) writeField2(oprot thrift.Protocol) (err error) { - if p.IsSetEdge() { - if err := oprot.WriteFieldBegin("edge", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:edge: ", p), err) } - if err := p.Edge.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Edge), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:edge: ", p), err) } - } - return err -} - -func (p *PathEntry) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("PathEntry(%+v)", *p) -} - -// Attributes: -// - EntryList -type Path struct { - EntryList []*PathEntry `thrift:"entry_list,1" db:"entry_list" json:"entry_list"` -} - -func NewPath() *Path { - return &Path{} -} - - -func (p *Path) GetEntryList() []*PathEntry { - return p.EntryList -} -func (p *Path) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Path) ReadField1(iprot thrift.Protocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*PathEntry, 0, size) - p.EntryList = tSlice - for i := 0; i < size; i ++ { - _elem1 := NewPathEntry() - if err := _elem1.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) - } - p.EntryList = append(p.EntryList, _elem1) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Path) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("Path"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Path) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("entry_list", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:entry_list: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.EntryList)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.EntryList { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:entry_list: ", p), err) } - return err -} - -func (p *Path) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Path(%+v)", *p) -} - -// Attributes: -// - BoolVal -// - Integer -// - Id -// - SinglePrecision -// - DoublePrecision -// - Str -// - Timestamp -// - Year -// - Month -// - Date -// - Datetime -// - Path -type ColumnValue struct { - BoolVal *bool `thrift:"bool_val,1" db:"bool_val" json:"bool_val,omitempty"` - Integer *int64 `thrift:"integer,2" db:"integer" json:"integer,omitempty"` - Id *IdType `thrift:"id,3" db:"id" json:"id,omitempty"` - SinglePrecision *float32 `thrift:"single_precision,4" db:"single_precision" json:"single_precision,omitempty"` - DoublePrecision *float64 `thrift:"double_precision,5" db:"double_precision" json:"double_precision,omitempty"` - Str []byte `thrift:"str,6" db:"str" json:"str,omitempty"` - Timestamp *Timestamp `thrift:"timestamp,7" db:"timestamp" json:"timestamp,omitempty"` - Year *Year `thrift:"year,8" db:"year" json:"year,omitempty"` - Month *YearMonth `thrift:"month,9" db:"month" json:"month,omitempty"` - Date *Date `thrift:"date,10" db:"date" json:"date,omitempty"` - Datetime *DateTime `thrift:"datetime,11" db:"datetime" json:"datetime,omitempty"` - // unused fields # 12 to 40 - Path *Path `thrift:"path,41" db:"path" json:"path,omitempty"` -} - -func NewColumnValue() *ColumnValue { - return &ColumnValue{} -} - -var ColumnValue_BoolVal_DEFAULT bool -func (p *ColumnValue) GetBoolVal() bool { - if !p.IsSetBoolVal() { - return ColumnValue_BoolVal_DEFAULT - } -return *p.BoolVal -} -var ColumnValue_Integer_DEFAULT int64 -func (p *ColumnValue) GetInteger() int64 { - if !p.IsSetInteger() { - return ColumnValue_Integer_DEFAULT - } -return *p.Integer -} -var ColumnValue_Id_DEFAULT IdType -func (p *ColumnValue) GetId() IdType { - if !p.IsSetId() { - return ColumnValue_Id_DEFAULT - } -return *p.Id -} -var ColumnValue_SinglePrecision_DEFAULT float32 -func (p *ColumnValue) GetSinglePrecision() float32 { - if !p.IsSetSinglePrecision() { - return ColumnValue_SinglePrecision_DEFAULT - } -return *p.SinglePrecision -} -var ColumnValue_DoublePrecision_DEFAULT float64 -func (p *ColumnValue) GetDoublePrecision() float64 { - if !p.IsSetDoublePrecision() { - return ColumnValue_DoublePrecision_DEFAULT - } -return *p.DoublePrecision -} -var ColumnValue_Str_DEFAULT []byte - -func (p *ColumnValue) GetStr() []byte { - return p.Str -} -var ColumnValue_Timestamp_DEFAULT Timestamp -func (p *ColumnValue) GetTimestamp() Timestamp { - if !p.IsSetTimestamp() { - return ColumnValue_Timestamp_DEFAULT - } -return *p.Timestamp -} -var ColumnValue_Year_DEFAULT Year -func (p *ColumnValue) GetYear() Year { - if !p.IsSetYear() { - return ColumnValue_Year_DEFAULT - } -return *p.Year -} -var ColumnValue_Month_DEFAULT *YearMonth -func (p *ColumnValue) GetMonth() *YearMonth { - if !p.IsSetMonth() { - return ColumnValue_Month_DEFAULT - } -return p.Month -} -var ColumnValue_Date_DEFAULT *Date -func (p *ColumnValue) GetDate() *Date { - if !p.IsSetDate() { - return ColumnValue_Date_DEFAULT - } -return p.Date -} -var ColumnValue_Datetime_DEFAULT *DateTime -func (p *ColumnValue) GetDatetime() *DateTime { - if !p.IsSetDatetime() { - return ColumnValue_Datetime_DEFAULT - } -return p.Datetime -} -var ColumnValue_Path_DEFAULT *Path -func (p *ColumnValue) GetPath() *Path { - if !p.IsSetPath() { - return ColumnValue_Path_DEFAULT - } -return p.Path -} -func (p *ColumnValue) CountSetFieldsColumnValue() int { - count := 0 - if (p.IsSetBoolVal()) { - count++ - } - if (p.IsSetInteger()) { - count++ - } - if (p.IsSetId()) { - count++ - } - if (p.IsSetSinglePrecision()) { - count++ - } - if (p.IsSetDoublePrecision()) { - count++ - } - if (p.IsSetTimestamp()) { - count++ - } - if (p.IsSetYear()) { - count++ - } - if (p.IsSetMonth()) { - count++ - } - if (p.IsSetDate()) { - count++ - } - if (p.IsSetDatetime()) { - count++ - } - if (p.IsSetPath()) { - count++ - } - return count - -} - -func (p *ColumnValue) IsSetBoolVal() bool { - return p.BoolVal != nil -} - -func (p *ColumnValue) IsSetInteger() bool { - return p.Integer != nil -} - -func (p *ColumnValue) IsSetId() bool { - return p.Id != nil -} - -func (p *ColumnValue) IsSetSinglePrecision() bool { - return p.SinglePrecision != nil -} - -func (p *ColumnValue) IsSetDoublePrecision() bool { - return p.DoublePrecision != nil -} - -func (p *ColumnValue) IsSetStr() bool { - return p.Str != nil -} - -func (p *ColumnValue) IsSetTimestamp() bool { - return p.Timestamp != nil -} - -func (p *ColumnValue) IsSetYear() bool { - return p.Year != nil -} - -func (p *ColumnValue) IsSetMonth() bool { - return p.Month != nil -} - -func (p *ColumnValue) IsSetDate() bool { - return p.Date != nil -} - -func (p *ColumnValue) IsSetDatetime() bool { - return p.Datetime != nil -} - -func (p *ColumnValue) IsSetPath() bool { - return p.Path != nil -} - -func (p *ColumnValue) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - case 3: - if err := p.ReadField3(iprot); err != nil { - return err - } - case 4: - if err := p.ReadField4(iprot); err != nil { - return err - } - case 5: - if err := p.ReadField5(iprot); err != nil { - return err - } - case 6: - if err := p.ReadField6(iprot); err != nil { - return err - } - case 7: - if err := p.ReadField7(iprot); err != nil { - return err - } - case 8: - if err := p.ReadField8(iprot); err != nil { - return err - } - case 9: - if err := p.ReadField9(iprot); err != nil { - return err - } - case 10: - if err := p.ReadField10(iprot); err != nil { - return err - } - case 11: - if err := p.ReadField11(iprot); err != nil { - return err - } - case 41: - if err := p.ReadField41(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ColumnValue) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.BoolVal = &v -} - return nil -} - -func (p *ColumnValue) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.Integer = &v -} - return nil -} - -func (p *ColumnValue) ReadField3(iprot thrift.Protocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - temp := IdType(v) - p.Id = &temp -} - return nil -} - -func (p *ColumnValue) ReadField4(iprot thrift.Protocol) error { - if v, err := iprot.ReadFloat(); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - p.SinglePrecision = &v -} - return nil -} - -func (p *ColumnValue) ReadField5(iprot thrift.Protocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 5: ", err) -} else { - p.DoublePrecision = &v -} - return nil -} - -func (p *ColumnValue) ReadField6(iprot thrift.Protocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 6: ", err) -} else { - p.Str = v -} - return nil -} - -func (p *ColumnValue) ReadField7(iprot thrift.Protocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 7: ", err) -} else { - temp := Timestamp(v) - p.Timestamp = &temp -} - return nil -} - -func (p *ColumnValue) ReadField8(iprot thrift.Protocol) error { - if v, err := iprot.ReadI16(); err != nil { - return thrift.PrependError("error reading field 8: ", err) -} else { - temp := Year(v) - p.Year = &temp -} - return nil -} - -func (p *ColumnValue) ReadField9(iprot thrift.Protocol) error { - p.Month = NewYearMonth() - if err := p.Month.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Month), err) - } - return nil -} - -func (p *ColumnValue) ReadField10(iprot thrift.Protocol) error { - p.Date = NewDate() - if err := p.Date.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Date), err) - } - return nil -} - -func (p *ColumnValue) ReadField11(iprot thrift.Protocol) error { - p.Datetime = NewDateTime() - if err := p.Datetime.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Datetime), err) - } - return nil -} - -func (p *ColumnValue) ReadField41(iprot thrift.Protocol) error { - p.Path = NewPath() - if err := p.Path.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Path), err) - } - return nil -} - -func (p *ColumnValue) Write(oprot thrift.Protocol) error { - if c := p.CountSetFieldsColumnValue(); c != 1 { - return fmt.Errorf("%T write union: exactly one field must be set (%d set).", p, c) - } - if err := oprot.WriteStructBegin("ColumnValue"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { return err } - if err := p.writeField4(oprot); err != nil { return err } - if err := p.writeField5(oprot); err != nil { return err } - if err := p.writeField6(oprot); err != nil { return err } - if err := p.writeField7(oprot); err != nil { return err } - if err := p.writeField8(oprot); err != nil { return err } - if err := p.writeField9(oprot); err != nil { return err } - if err := p.writeField10(oprot); err != nil { return err } - if err := p.writeField11(oprot); err != nil { return err } - if err := p.writeField41(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ColumnValue) writeField1(oprot thrift.Protocol) (err error) { - if p.IsSetBoolVal() { - if err := oprot.WriteFieldBegin("bool_val", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:bool_val: ", p), err) } - if err := oprot.WriteBool(bool(*p.BoolVal)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.bool_val (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:bool_val: ", p), err) } - } - return err -} - -func (p *ColumnValue) writeField2(oprot thrift.Protocol) (err error) { - if p.IsSetInteger() { - if err := oprot.WriteFieldBegin("integer", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:integer: ", p), err) } - if err := oprot.WriteI64(int64(*p.Integer)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.integer (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:integer: ", p), err) } - } - return err -} - -func (p *ColumnValue) writeField3(oprot thrift.Protocol) (err error) { - if p.IsSetId() { - if err := oprot.WriteFieldBegin("id", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:id: ", p), err) } - if err := oprot.WriteI64(int64(*p.Id)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.id (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:id: ", p), err) } - } - return err -} - -func (p *ColumnValue) writeField4(oprot thrift.Protocol) (err error) { - if p.IsSetSinglePrecision() { - if err := oprot.WriteFieldBegin("single_precision", thrift.FLOAT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:single_precision: ", p), err) } - if err := oprot.WriteFloat(float32(*p.SinglePrecision)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.single_precision (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:single_precision: ", p), err) } - } - return err -} - -func (p *ColumnValue) writeField5(oprot thrift.Protocol) (err error) { - if p.IsSetDoublePrecision() { - if err := oprot.WriteFieldBegin("double_precision", thrift.DOUBLE, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:double_precision: ", p), err) } - if err := oprot.WriteDouble(float64(*p.DoublePrecision)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.double_precision (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:double_precision: ", p), err) } - } - return err -} - -func (p *ColumnValue) writeField6(oprot thrift.Protocol) (err error) { - if p.IsSetStr() { - if err := oprot.WriteFieldBegin("str", thrift.STRING, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:str: ", p), err) } - if err := oprot.WriteBinary(p.Str); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.str (6) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:str: ", p), err) } - } - return err -} - -func (p *ColumnValue) writeField7(oprot thrift.Protocol) (err error) { - if p.IsSetTimestamp() { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:timestamp: ", p), err) } - if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (7) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:timestamp: ", p), err) } - } - return err -} - -func (p *ColumnValue) writeField8(oprot thrift.Protocol) (err error) { - if p.IsSetYear() { - if err := oprot.WriteFieldBegin("year", thrift.I16, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:year: ", p), err) } - if err := oprot.WriteI16(int16(*p.Year)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.year (8) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:year: ", p), err) } - } - return err -} - -func (p *ColumnValue) writeField9(oprot thrift.Protocol) (err error) { - if p.IsSetMonth() { - if err := oprot.WriteFieldBegin("month", thrift.STRUCT, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:month: ", p), err) } - if err := p.Month.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Month), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:month: ", p), err) } - } - return err -} - -func (p *ColumnValue) writeField10(oprot thrift.Protocol) (err error) { - if p.IsSetDate() { - if err := oprot.WriteFieldBegin("date", thrift.STRUCT, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:date: ", p), err) } - if err := p.Date.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Date), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:date: ", p), err) } - } - return err -} - -func (p *ColumnValue) writeField11(oprot thrift.Protocol) (err error) { - if p.IsSetDatetime() { - if err := oprot.WriteFieldBegin("datetime", thrift.STRUCT, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:datetime: ", p), err) } - if err := p.Datetime.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Datetime), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:datetime: ", p), err) } - } - return err -} - -func (p *ColumnValue) writeField41(oprot thrift.Protocol) (err error) { - if p.IsSetPath() { - if err := oprot.WriteFieldBegin("path", thrift.STRUCT, 41); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 41:path: ", p), err) } - if err := p.Path.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Path), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 41:path: ", p), err) } - } - return err -} - -func (p *ColumnValue) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ColumnValue(%+v)", *p) -} - -// Attributes: -// - Columns -type RowValue struct { - Columns []*ColumnValue `thrift:"columns,1" db:"columns" json:"columns"` -} - -func NewRowValue() *RowValue { - return &RowValue{} -} - - -func (p *RowValue) GetColumns() []*ColumnValue { - return p.Columns -} -func (p *RowValue) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RowValue) ReadField1(iprot thrift.Protocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*ColumnValue, 0, size) - p.Columns = tSlice - for i := 0; i < size; i ++ { - _elem2 := NewColumnValue() - if err := _elem2.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) - } - p.Columns = append(p.Columns, _elem2) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *RowValue) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("RowValue"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *RowValue) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("columns", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:columns: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Columns)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Columns { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:columns: ", p), err) } - return err -} - -func (p *RowValue) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RowValue(%+v)", *p) -} - -// Attributes: -// - ErrorCode -// - LatencyInUs -// - ErrorMsg -// - ColumnNames -// - Rows -// - SpaceName -// - WarningMsg -type ExecutionResponse struct { - ErrorCode ErrorCode `thrift:"error_code,1,required" db:"error_code" json:"error_code"` - LatencyInUs int32 `thrift:"latency_in_us,2,required" db:"latency_in_us" json:"latency_in_us"` - ErrorMsg *string `thrift:"error_msg,3" db:"error_msg" json:"error_msg,omitempty"` - ColumnNames [][]byte `thrift:"column_names,4" db:"column_names" json:"column_names,omitempty"` - Rows []*RowValue `thrift:"rows,5" db:"rows" json:"rows,omitempty"` - SpaceName *string `thrift:"space_name,6" db:"space_name" json:"space_name,omitempty"` - WarningMsg *string `thrift:"warning_msg,7" db:"warning_msg" json:"warning_msg,omitempty"` -} - -func NewExecutionResponse() *ExecutionResponse { - return &ExecutionResponse{} -} - - -func (p *ExecutionResponse) GetErrorCode() ErrorCode { - return p.ErrorCode -} - -func (p *ExecutionResponse) GetLatencyInUs() int32 { - return p.LatencyInUs -} -var ExecutionResponse_ErrorMsg_DEFAULT string -func (p *ExecutionResponse) GetErrorMsg() string { - if !p.IsSetErrorMsg() { - return ExecutionResponse_ErrorMsg_DEFAULT - } -return *p.ErrorMsg -} -var ExecutionResponse_ColumnNames_DEFAULT [][]byte - -func (p *ExecutionResponse) GetColumnNames() [][]byte { - return p.ColumnNames -} -var ExecutionResponse_Rows_DEFAULT []*RowValue - -func (p *ExecutionResponse) GetRows() []*RowValue { - return p.Rows -} -var ExecutionResponse_SpaceName_DEFAULT string -func (p *ExecutionResponse) GetSpaceName() string { - if !p.IsSetSpaceName() { - return ExecutionResponse_SpaceName_DEFAULT - } -return *p.SpaceName -} -var ExecutionResponse_WarningMsg_DEFAULT string -func (p *ExecutionResponse) GetWarningMsg() string { - if !p.IsSetWarningMsg() { - return ExecutionResponse_WarningMsg_DEFAULT - } -return *p.WarningMsg -} -func (p *ExecutionResponse) IsSetErrorMsg() bool { - return p.ErrorMsg != nil -} - -func (p *ExecutionResponse) IsSetColumnNames() bool { - return p.ColumnNames != nil -} - -func (p *ExecutionResponse) IsSetRows() bool { - return p.Rows != nil -} - -func (p *ExecutionResponse) IsSetSpaceName() bool { - return p.SpaceName != nil -} - -func (p *ExecutionResponse) IsSetWarningMsg() bool { - return p.WarningMsg != nil -} - -func (p *ExecutionResponse) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetErrorCode bool = false; - var issetLatencyInUs bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - issetErrorCode = true - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - issetLatencyInUs = true - case 3: - if err := p.ReadField3(iprot); err != nil { - return err - } - case 4: - if err := p.ReadField4(iprot); err != nil { - return err - } - case 5: - if err := p.ReadField5(iprot); err != nil { - return err - } - case 6: - if err := p.ReadField6(iprot); err != nil { - return err - } - case 7: - if err := p.ReadField7(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetErrorCode{ - return thrift.NewProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ErrorCode is not set")); - } - if !issetLatencyInUs{ - return thrift.NewProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field LatencyInUs is not set")); - } - return nil -} - -func (p *ExecutionResponse) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := ErrorCode(v) - p.ErrorCode = temp -} - return nil -} - -func (p *ExecutionResponse) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.LatencyInUs = v -} - return nil -} - -func (p *ExecutionResponse) ReadField3(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.ErrorMsg = &v -} - return nil -} - -func (p *ExecutionResponse) ReadField4(iprot thrift.Protocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([][]byte, 0, size) - p.ColumnNames = tSlice - for i := 0; i < size; i ++ { -var _elem3 []byte - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 0: ", err) -} else { - _elem3 = v -} - p.ColumnNames = append(p.ColumnNames, _elem3) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ExecutionResponse) ReadField5(iprot thrift.Protocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*RowValue, 0, size) - p.Rows = tSlice - for i := 0; i < size; i ++ { - _elem4 := NewRowValue() - if err := _elem4.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Rows = append(p.Rows, _elem4) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ExecutionResponse) ReadField6(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 6: ", err) -} else { - p.SpaceName = &v -} - return nil -} - -func (p *ExecutionResponse) ReadField7(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 7: ", err) -} else { - p.WarningMsg = &v -} - return nil -} - -func (p *ExecutionResponse) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("ExecutionResponse"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { return err } - if err := p.writeField4(oprot); err != nil { return err } - if err := p.writeField5(oprot); err != nil { return err } - if err := p.writeField6(oprot); err != nil { return err } - if err := p.writeField7(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ExecutionResponse) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("error_code", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error_code: ", p), err) } - if err := oprot.WriteI32(int32(p.ErrorCode)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.error_code (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error_code: ", p), err) } - return err -} - -func (p *ExecutionResponse) writeField2(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("latency_in_us", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:latency_in_us: ", p), err) } - if err := oprot.WriteI32(int32(p.LatencyInUs)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.latency_in_us (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:latency_in_us: ", p), err) } - return err -} - -func (p *ExecutionResponse) writeField3(oprot thrift.Protocol) (err error) { - if p.IsSetErrorMsg() { - if err := oprot.WriteFieldBegin("error_msg", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:error_msg: ", p), err) } - if err := oprot.WriteString(string(*p.ErrorMsg)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.error_msg (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:error_msg: ", p), err) } - } - return err -} - -func (p *ExecutionResponse) writeField4(oprot thrift.Protocol) (err error) { - if p.IsSetColumnNames() { - if err := oprot.WriteFieldBegin("column_names", thrift.LIST, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:column_names: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRING, len(p.ColumnNames)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.ColumnNames { - if err := oprot.WriteBinary(v); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:column_names: ", p), err) } - } - return err -} - -func (p *ExecutionResponse) writeField5(oprot thrift.Protocol) (err error) { - if p.IsSetRows() { - if err := oprot.WriteFieldBegin("rows", thrift.LIST, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:rows: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Rows)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Rows { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:rows: ", p), err) } - } - return err -} - -func (p *ExecutionResponse) writeField6(oprot thrift.Protocol) (err error) { - if p.IsSetSpaceName() { - if err := oprot.WriteFieldBegin("space_name", thrift.STRING, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:space_name: ", p), err) } - if err := oprot.WriteString(string(*p.SpaceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.space_name (6) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:space_name: ", p), err) } - } - return err -} - -func (p *ExecutionResponse) writeField7(oprot thrift.Protocol) (err error) { - if p.IsSetWarningMsg() { - if err := oprot.WriteFieldBegin("warning_msg", thrift.STRING, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:warning_msg: ", p), err) } - if err := oprot.WriteString(string(*p.WarningMsg)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.warning_msg (7) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:warning_msg: ", p), err) } - } - return err -} - -func (p *ExecutionResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ExecutionResponse(%+v)", *p) -} - -// Attributes: -// - ErrorCode -// - SessionID -// - ErrorMsg -type AuthResponse struct { - ErrorCode ErrorCode `thrift:"error_code,1,required" db:"error_code" json:"error_code"` - SessionID *int64 `thrift:"session_id,2" db:"session_id" json:"session_id,omitempty"` - ErrorMsg *string `thrift:"error_msg,3" db:"error_msg" json:"error_msg,omitempty"` -} - -func NewAuthResponse() *AuthResponse { - return &AuthResponse{} -} - - -func (p *AuthResponse) GetErrorCode() ErrorCode { - return p.ErrorCode -} -var AuthResponse_SessionID_DEFAULT int64 -func (p *AuthResponse) GetSessionID() int64 { - if !p.IsSetSessionID() { - return AuthResponse_SessionID_DEFAULT - } -return *p.SessionID -} -var AuthResponse_ErrorMsg_DEFAULT string -func (p *AuthResponse) GetErrorMsg() string { - if !p.IsSetErrorMsg() { - return AuthResponse_ErrorMsg_DEFAULT - } -return *p.ErrorMsg -} -func (p *AuthResponse) IsSetSessionID() bool { - return p.SessionID != nil -} - -func (p *AuthResponse) IsSetErrorMsg() bool { - return p.ErrorMsg != nil -} - -func (p *AuthResponse) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetErrorCode bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - issetErrorCode = true - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - case 3: - if err := p.ReadField3(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetErrorCode{ - return thrift.NewProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ErrorCode is not set")); - } - return nil -} - -func (p *AuthResponse) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := ErrorCode(v) - p.ErrorCode = temp -} - return nil -} - -func (p *AuthResponse) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.SessionID = &v -} - return nil -} - -func (p *AuthResponse) ReadField3(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.ErrorMsg = &v -} - return nil -} - -func (p *AuthResponse) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("AuthResponse"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *AuthResponse) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("error_code", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error_code: ", p), err) } - if err := oprot.WriteI32(int32(p.ErrorCode)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.error_code (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error_code: ", p), err) } - return err -} - -func (p *AuthResponse) writeField2(oprot thrift.Protocol) (err error) { - if p.IsSetSessionID() { - if err := oprot.WriteFieldBegin("session_id", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:session_id: ", p), err) } - if err := oprot.WriteI64(int64(*p.SessionID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.session_id (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:session_id: ", p), err) } - } - return err -} - -func (p *AuthResponse) writeField3(oprot thrift.Protocol) (err error) { - if p.IsSetErrorMsg() { - if err := oprot.WriteFieldBegin("error_msg", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:error_msg: ", p), err) } - if err := oprot.WriteString(string(*p.ErrorMsg)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.error_msg (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:error_msg: ", p), err) } - } - return err -} - -func (p *AuthResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AuthResponse(%+v)", *p) -} - diff --git a/vendor/github.com/vesoft-inc/nebula-go/nebula/ttypes.go b/vendor/github.com/vesoft-inc/nebula-go/nebula/ttypes.go deleted file mode 100644 index 8a3bd88b..00000000 --- a/vendor/github.com/vesoft-inc/nebula-go/nebula/ttypes.go +++ /dev/null @@ -1,1804 +0,0 @@ -// Autogenerated by Thrift Compiler (facebook) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -// @generated - -package nebula - -import ( - "bytes" - "sync" - "fmt" - thrift "github.com/facebook/fbthrift/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = sync.Mutex{} -var _ = bytes.Equal - -var GoUnusedProtection__ int; - -type SupportedType int64 -const ( - SupportedType_UNKNOWN SupportedType = 0 - SupportedType_BOOL SupportedType = 1 - SupportedType_INT SupportedType = 2 - SupportedType_VID SupportedType = 3 - SupportedType_FLOAT SupportedType = 4 - SupportedType_DOUBLE SupportedType = 5 - SupportedType_STRING SupportedType = 6 - SupportedType_TIMESTAMP SupportedType = 21 - SupportedType_YEAR SupportedType = 22 - SupportedType_YEARMONTH SupportedType = 23 - SupportedType_DATE SupportedType = 24 - SupportedType_DATETIME SupportedType = 25 - SupportedType_PATH SupportedType = 41 -) - -var SupportedTypeToName = map[SupportedType]string { - SupportedType_UNKNOWN: "UNKNOWN", - SupportedType_BOOL: "BOOL", - SupportedType_INT: "INT", - SupportedType_VID: "VID", - SupportedType_FLOAT: "FLOAT", - SupportedType_DOUBLE: "DOUBLE", - SupportedType_STRING: "STRING", - SupportedType_TIMESTAMP: "TIMESTAMP", - SupportedType_YEAR: "YEAR", - SupportedType_YEARMONTH: "YEARMONTH", - SupportedType_DATE: "DATE", - SupportedType_DATETIME: "DATETIME", - SupportedType_PATH: "PATH", -} - -var SupportedTypeToValue = map[string]SupportedType { - "UNKNOWN": SupportedType_UNKNOWN, - "BOOL": SupportedType_BOOL, - "INT": SupportedType_INT, - "VID": SupportedType_VID, - "FLOAT": SupportedType_FLOAT, - "DOUBLE": SupportedType_DOUBLE, - "STRING": SupportedType_STRING, - "TIMESTAMP": SupportedType_TIMESTAMP, - "YEAR": SupportedType_YEAR, - "YEARMONTH": SupportedType_YEARMONTH, - "DATE": SupportedType_DATE, - "DATETIME": SupportedType_DATETIME, - "PATH": SupportedType_PATH, -} - -func (p SupportedType) String() string { - if v, ok := SupportedTypeToName[p]; ok { - return v - } - return "" -} - -func SupportedTypeFromString(s string) (SupportedType, error) { - if v, ok := SupportedTypeToValue[s]; ok { - return v, nil - } - return SupportedType(0), fmt.Errorf("not a valid SupportedType string") -} - -func SupportedTypePtr(v SupportedType) *SupportedType { return &v } - -//* GOD is A global senior administrator.like root of Linux systems. -//* ADMIN is an administrator for a given Graph Space. -//* DBA is an schema administrator for a given Graph Space. -//* USER is a normal user for a given Graph Space. A User can access (read and write) the data in the Graph Space. -//* GUEST is a read-only role for a given Graph Space. A Guest cannot modify the data in the Graph Space. -//* Refer to header file src/graph/PermissionManager.h for details. -// -type RoleType int64 -const ( - RoleType_GOD RoleType = 1 - RoleType_ADMIN RoleType = 2 - RoleType_DBA RoleType = 3 - RoleType_USER RoleType = 4 - RoleType_GUEST RoleType = 5 -) - -var RoleTypeToName = map[RoleType]string { - RoleType_GOD: "GOD", - RoleType_ADMIN: "ADMIN", - RoleType_DBA: "DBA", - RoleType_USER: "USER", - RoleType_GUEST: "GUEST", -} - -var RoleTypeToValue = map[string]RoleType { - "GOD": RoleType_GOD, - "ADMIN": RoleType_ADMIN, - "DBA": RoleType_DBA, - "USER": RoleType_USER, - "GUEST": RoleType_GUEST, -} - -func (p RoleType) String() string { - if v, ok := RoleTypeToName[p]; ok { - return v - } - return "" -} - -func RoleTypeFromString(s string) (RoleType, error) { - if v, ok := RoleTypeToValue[s]; ok { - return v, nil - } - return RoleType(0), fmt.Errorf("not a valid RoleType string") -} - -func RoleTypePtr(v RoleType) *RoleType { return &v } - -type GraphSpaceID int32 - -func GraphSpaceIDPtr(v GraphSpaceID) *GraphSpaceID { return &v } - -type PartitionID int32 - -func PartitionIDPtr(v PartitionID) *PartitionID { return &v } - -type TagID int32 - -func TagIDPtr(v TagID) *TagID { return &v } - -type EdgeType int32 - -func EdgeTypePtr(v EdgeType) *EdgeType { return &v } - -type EdgeRanking int64 - -func EdgeRankingPtr(v EdgeRanking) *EdgeRanking { return &v } - -type VertexID int64 - -func VertexIDPtr(v VertexID) *VertexID { return &v } - -type IndexID int32 - -func IndexIDPtr(v IndexID) *IndexID { return &v } - -type IPv4 int32 - -func IPv4Ptr(v IPv4) *IPv4 { return &v } - -type Port int32 - -func PortPtr(v Port) *Port { return &v } - -type SchemaVer int64 - -func SchemaVerPtr(v SchemaVer) *SchemaVer { return &v } - -type ClusterID int64 - -func ClusterIDPtr(v ClusterID) *ClusterID { return &v } - -// Attributes: -// - Type -// - ValueType -// - Schema -type ValueType struct { - Type SupportedType `thrift:"type,1" db:"type" json:"type"` - ValueType *ValueType `thrift:"value_type,2" db:"value_type" json:"value_type,omitempty"` - Schema *Schema `thrift:"schema,3" db:"schema" json:"schema,omitempty"` -} - -func NewValueType() *ValueType { - return &ValueType{} -} - - -func (p *ValueType) GetType() SupportedType { - return p.Type -} -var ValueType_ValueType_DEFAULT *ValueType -func (p *ValueType) GetValueType() *ValueType { - if !p.IsSetValueType() { - return ValueType_ValueType_DEFAULT - } -return p.ValueType -} -var ValueType_Schema_DEFAULT *Schema -func (p *ValueType) GetSchema() *Schema { - if !p.IsSetSchema() { - return ValueType_Schema_DEFAULT - } -return p.Schema -} -func (p *ValueType) IsSetValueType() bool { - return p.ValueType != nil -} - -func (p *ValueType) IsSetSchema() bool { - return p.Schema != nil -} - -func (p *ValueType) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - case 3: - if err := p.ReadField3(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ValueType) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := SupportedType(v) - p.Type = temp -} - return nil -} - -func (p *ValueType) ReadField2(iprot thrift.Protocol) error { - p.ValueType = NewValueType() - if err := p.ValueType.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ValueType), err) - } - return nil -} - -func (p *ValueType) ReadField3(iprot thrift.Protocol) error { - p.Schema = NewSchema() - if err := p.Schema.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Schema), err) - } - return nil -} - -func (p *ValueType) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("ValueType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ValueType) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("type", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:type: ", p), err) } - if err := oprot.WriteI32(int32(p.Type)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.type (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:type: ", p), err) } - return err -} - -func (p *ValueType) writeField2(oprot thrift.Protocol) (err error) { - if p.IsSetValueType() { - if err := oprot.WriteFieldBegin("value_type", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value_type: ", p), err) } - if err := p.ValueType.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ValueType), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value_type: ", p), err) } - } - return err -} - -func (p *ValueType) writeField3(oprot thrift.Protocol) (err error) { - if p.IsSetSchema() { - if err := oprot.WriteFieldBegin("schema", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:schema: ", p), err) } - if err := p.Schema.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Schema), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:schema: ", p), err) } - } - return err -} - -func (p *ValueType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ValueType(%+v)", *p) -} - -// Attributes: -// - IntValue -// - BoolValue -// - DoubleValue -// - StringValue -// - Timestamp -type Value struct { - IntValue *int64 `thrift:"int_value,1" db:"int_value" json:"int_value,omitempty"` - BoolValue *bool `thrift:"bool_value,2" db:"bool_value" json:"bool_value,omitempty"` - DoubleValue *float64 `thrift:"double_value,3" db:"double_value" json:"double_value,omitempty"` - StringValue *string `thrift:"string_value,4" db:"string_value" json:"string_value,omitempty"` - Timestamp *int64 `thrift:"timestamp,5" db:"timestamp" json:"timestamp,omitempty"` -} - -func NewValue() *Value { - return &Value{} -} - -var Value_IntValue_DEFAULT int64 -func (p *Value) GetIntValue() int64 { - if !p.IsSetIntValue() { - return Value_IntValue_DEFAULT - } -return *p.IntValue -} -var Value_BoolValue_DEFAULT bool -func (p *Value) GetBoolValue() bool { - if !p.IsSetBoolValue() { - return Value_BoolValue_DEFAULT - } -return *p.BoolValue -} -var Value_DoubleValue_DEFAULT float64 -func (p *Value) GetDoubleValue() float64 { - if !p.IsSetDoubleValue() { - return Value_DoubleValue_DEFAULT - } -return *p.DoubleValue -} -var Value_StringValue_DEFAULT string -func (p *Value) GetStringValue() string { - if !p.IsSetStringValue() { - return Value_StringValue_DEFAULT - } -return *p.StringValue -} -var Value_Timestamp_DEFAULT int64 -func (p *Value) GetTimestamp() int64 { - if !p.IsSetTimestamp() { - return Value_Timestamp_DEFAULT - } -return *p.Timestamp -} -func (p *Value) CountSetFieldsValue() int { - count := 0 - if (p.IsSetIntValue()) { - count++ - } - if (p.IsSetBoolValue()) { - count++ - } - if (p.IsSetDoubleValue()) { - count++ - } - if (p.IsSetStringValue()) { - count++ - } - if (p.IsSetTimestamp()) { - count++ - } - return count - -} - -func (p *Value) IsSetIntValue() bool { - return p.IntValue != nil -} - -func (p *Value) IsSetBoolValue() bool { - return p.BoolValue != nil -} - -func (p *Value) IsSetDoubleValue() bool { - return p.DoubleValue != nil -} - -func (p *Value) IsSetStringValue() bool { - return p.StringValue != nil -} - -func (p *Value) IsSetTimestamp() bool { - return p.Timestamp != nil -} - -func (p *Value) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - case 3: - if err := p.ReadField3(iprot); err != nil { - return err - } - case 4: - if err := p.ReadField4(iprot); err != nil { - return err - } - case 5: - if err := p.ReadField5(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Value) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.IntValue = &v -} - return nil -} - -func (p *Value) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.BoolValue = &v -} - return nil -} - -func (p *Value) ReadField3(iprot thrift.Protocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.DoubleValue = &v -} - return nil -} - -func (p *Value) ReadField4(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - p.StringValue = &v -} - return nil -} - -func (p *Value) ReadField5(iprot thrift.Protocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 5: ", err) -} else { - p.Timestamp = &v -} - return nil -} - -func (p *Value) Write(oprot thrift.Protocol) error { - if c := p.CountSetFieldsValue(); c != 1 { - return fmt.Errorf("%T write union: exactly one field must be set (%d set).", p, c) - } - if err := oprot.WriteStructBegin("Value"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { return err } - if err := p.writeField4(oprot); err != nil { return err } - if err := p.writeField5(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Value) writeField1(oprot thrift.Protocol) (err error) { - if p.IsSetIntValue() { - if err := oprot.WriteFieldBegin("int_value", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:int_value: ", p), err) } - if err := oprot.WriteI64(int64(*p.IntValue)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.int_value (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:int_value: ", p), err) } - } - return err -} - -func (p *Value) writeField2(oprot thrift.Protocol) (err error) { - if p.IsSetBoolValue() { - if err := oprot.WriteFieldBegin("bool_value", thrift.BOOL, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:bool_value: ", p), err) } - if err := oprot.WriteBool(bool(*p.BoolValue)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.bool_value (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:bool_value: ", p), err) } - } - return err -} - -func (p *Value) writeField3(oprot thrift.Protocol) (err error) { - if p.IsSetDoubleValue() { - if err := oprot.WriteFieldBegin("double_value", thrift.DOUBLE, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:double_value: ", p), err) } - if err := oprot.WriteDouble(float64(*p.DoubleValue)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.double_value (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:double_value: ", p), err) } - } - return err -} - -func (p *Value) writeField4(oprot thrift.Protocol) (err error) { - if p.IsSetStringValue() { - if err := oprot.WriteFieldBegin("string_value", thrift.STRING, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:string_value: ", p), err) } - if err := oprot.WriteString(string(*p.StringValue)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.string_value (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:string_value: ", p), err) } - } - return err -} - -func (p *Value) writeField5(oprot thrift.Protocol) (err error) { - if p.IsSetTimestamp() { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:timestamp: ", p), err) } - if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:timestamp: ", p), err) } - } - return err -} - -func (p *Value) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Value(%+v)", *p) -} - -// Attributes: -// - Name -// - Type -// - DefaultValue -type ColumnDef struct { - Name string `thrift:"name,1,required" db:"name" json:"name"` - Type *ValueType `thrift:"type,2,required" db:"type" json:"type"` - DefaultValue *Value `thrift:"default_value,3" db:"default_value" json:"default_value,omitempty"` -} - -func NewColumnDef() *ColumnDef { - return &ColumnDef{} -} - - -func (p *ColumnDef) GetName() string { - return p.Name -} -var ColumnDef_Type_DEFAULT *ValueType -func (p *ColumnDef) GetType() *ValueType { - if !p.IsSetType() { - return ColumnDef_Type_DEFAULT - } -return p.Type -} -var ColumnDef_DefaultValue_DEFAULT *Value -func (p *ColumnDef) GetDefaultValue() *Value { - if !p.IsSetDefaultValue() { - return ColumnDef_DefaultValue_DEFAULT - } -return p.DefaultValue -} -func (p *ColumnDef) IsSetType() bool { - return p.Type != nil -} - -func (p *ColumnDef) IsSetDefaultValue() bool { - return p.DefaultValue != nil -} - -func (p *ColumnDef) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetName bool = false; - var issetType bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - issetName = true - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - issetType = true - case 3: - if err := p.ReadField3(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetName{ - return thrift.NewProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Name is not set")); - } - if !issetType{ - return thrift.NewProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Type is not set")); - } - return nil -} - -func (p *ColumnDef) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Name = v -} - return nil -} - -func (p *ColumnDef) ReadField2(iprot thrift.Protocol) error { - p.Type = NewValueType() - if err := p.Type.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Type), err) - } - return nil -} - -func (p *ColumnDef) ReadField3(iprot thrift.Protocol) error { - p.DefaultValue = NewValue() - if err := p.DefaultValue.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DefaultValue), err) - } - return nil -} - -func (p *ColumnDef) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("ColumnDef"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ColumnDef) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:name: ", p), err) } - if err := oprot.WriteString(string(p.Name)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.name (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:name: ", p), err) } - return err -} - -func (p *ColumnDef) writeField2(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("type", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:type: ", p), err) } - if err := p.Type.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Type), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:type: ", p), err) } - return err -} - -func (p *ColumnDef) writeField3(oprot thrift.Protocol) (err error) { - if p.IsSetDefaultValue() { - if err := oprot.WriteFieldBegin("default_value", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:default_value: ", p), err) } - if err := p.DefaultValue.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DefaultValue), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:default_value: ", p), err) } - } - return err -} - -func (p *ColumnDef) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ColumnDef(%+v)", *p) -} - -// Attributes: -// - TtlDuration -// - TtlCol -type SchemaProp struct { - TtlDuration *int64 `thrift:"ttl_duration,1" db:"ttl_duration" json:"ttl_duration,omitempty"` - TtlCol *string `thrift:"ttl_col,2" db:"ttl_col" json:"ttl_col,omitempty"` -} - -func NewSchemaProp() *SchemaProp { - return &SchemaProp{} -} - -var SchemaProp_TtlDuration_DEFAULT int64 -func (p *SchemaProp) GetTtlDuration() int64 { - if !p.IsSetTtlDuration() { - return SchemaProp_TtlDuration_DEFAULT - } -return *p.TtlDuration -} -var SchemaProp_TtlCol_DEFAULT string -func (p *SchemaProp) GetTtlCol() string { - if !p.IsSetTtlCol() { - return SchemaProp_TtlCol_DEFAULT - } -return *p.TtlCol -} -func (p *SchemaProp) IsSetTtlDuration() bool { - return p.TtlDuration != nil -} - -func (p *SchemaProp) IsSetTtlCol() bool { - return p.TtlCol != nil -} - -func (p *SchemaProp) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *SchemaProp) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.TtlDuration = &v -} - return nil -} - -func (p *SchemaProp) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.TtlCol = &v -} - return nil -} - -func (p *SchemaProp) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("SchemaProp"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *SchemaProp) writeField1(oprot thrift.Protocol) (err error) { - if p.IsSetTtlDuration() { - if err := oprot.WriteFieldBegin("ttl_duration", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ttl_duration: ", p), err) } - if err := oprot.WriteI64(int64(*p.TtlDuration)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ttl_duration (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ttl_duration: ", p), err) } - } - return err -} - -func (p *SchemaProp) writeField2(oprot thrift.Protocol) (err error) { - if p.IsSetTtlCol() { - if err := oprot.WriteFieldBegin("ttl_col", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ttl_col: ", p), err) } - if err := oprot.WriteString(string(*p.TtlCol)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ttl_col (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ttl_col: ", p), err) } - } - return err -} - -func (p *SchemaProp) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SchemaProp(%+v)", *p) -} - -// Attributes: -// - Columns -// - SchemaProp -type Schema struct { - Columns []*ColumnDef `thrift:"columns,1" db:"columns" json:"columns"` - SchemaProp *SchemaProp `thrift:"schema_prop,2" db:"schema_prop" json:"schema_prop"` -} - -func NewSchema() *Schema { - return &Schema{} -} - - -func (p *Schema) GetColumns() []*ColumnDef { - return p.Columns -} -var Schema_SchemaProp_DEFAULT *SchemaProp -func (p *Schema) GetSchemaProp() *SchemaProp { - if !p.IsSetSchemaProp() { - return Schema_SchemaProp_DEFAULT - } -return p.SchemaProp -} -func (p *Schema) IsSetSchemaProp() bool { - return p.SchemaProp != nil -} - -func (p *Schema) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Schema) ReadField1(iprot thrift.Protocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*ColumnDef, 0, size) - p.Columns = tSlice - for i := 0; i < size; i ++ { - _elem0 := NewColumnDef() - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Columns = append(p.Columns, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Schema) ReadField2(iprot thrift.Protocol) error { - p.SchemaProp = NewSchemaProp() - if err := p.SchemaProp.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SchemaProp), err) - } - return nil -} - -func (p *Schema) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("Schema"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Schema) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("columns", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:columns: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Columns)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Columns { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:columns: ", p), err) } - return err -} - -func (p *Schema) writeField2(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("schema_prop", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:schema_prop: ", p), err) } - if err := p.SchemaProp.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SchemaProp), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:schema_prop: ", p), err) } - return err -} - -func (p *Schema) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Schema(%+v)", *p) -} - -// Attributes: -// - TagID -// - EdgeType -type SchemaID struct { - TagID *TagID `thrift:"tag_id,1" db:"tag_id" json:"tag_id,omitempty"` - EdgeType *EdgeType `thrift:"edge_type,2" db:"edge_type" json:"edge_type,omitempty"` -} - -func NewSchemaID() *SchemaID { - return &SchemaID{} -} - -var SchemaID_TagID_DEFAULT TagID -func (p *SchemaID) GetTagID() TagID { - if !p.IsSetTagID() { - return SchemaID_TagID_DEFAULT - } -return *p.TagID -} -var SchemaID_EdgeType_DEFAULT EdgeType -func (p *SchemaID) GetEdgeType() EdgeType { - if !p.IsSetEdgeType() { - return SchemaID_EdgeType_DEFAULT - } -return *p.EdgeType -} -func (p *SchemaID) CountSetFieldsSchemaID() int { - count := 0 - if (p.IsSetTagID()) { - count++ - } - if (p.IsSetEdgeType()) { - count++ - } - return count - -} - -func (p *SchemaID) IsSetTagID() bool { - return p.TagID != nil -} - -func (p *SchemaID) IsSetEdgeType() bool { - return p.EdgeType != nil -} - -func (p *SchemaID) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *SchemaID) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := TagID(v) - p.TagID = &temp -} - return nil -} - -func (p *SchemaID) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - temp := EdgeType(v) - p.EdgeType = &temp -} - return nil -} - -func (p *SchemaID) Write(oprot thrift.Protocol) error { - if c := p.CountSetFieldsSchemaID(); c != 1 { - return fmt.Errorf("%T write union: exactly one field must be set (%d set).", p, c) - } - if err := oprot.WriteStructBegin("SchemaID"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *SchemaID) writeField1(oprot thrift.Protocol) (err error) { - if p.IsSetTagID() { - if err := oprot.WriteFieldBegin("tag_id", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:tag_id: ", p), err) } - if err := oprot.WriteI32(int32(*p.TagID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.tag_id (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:tag_id: ", p), err) } - } - return err -} - -func (p *SchemaID) writeField2(oprot thrift.Protocol) (err error) { - if p.IsSetEdgeType() { - if err := oprot.WriteFieldBegin("edge_type", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:edge_type: ", p), err) } - if err := oprot.WriteI32(int32(*p.EdgeType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.edge_type (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:edge_type: ", p), err) } - } - return err -} - -func (p *SchemaID) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SchemaID(%+v)", *p) -} - -// Attributes: -// - IndexID -// - IndexName -// - SchemaID -// - SchemaName -// - Fields -type IndexItem struct { - IndexID IndexID `thrift:"index_id,1" db:"index_id" json:"index_id"` - IndexName string `thrift:"index_name,2" db:"index_name" json:"index_name"` - SchemaID *SchemaID `thrift:"schema_id,3" db:"schema_id" json:"schema_id"` - SchemaName string `thrift:"schema_name,4" db:"schema_name" json:"schema_name"` - Fields []*ColumnDef `thrift:"fields,5" db:"fields" json:"fields"` -} - -func NewIndexItem() *IndexItem { - return &IndexItem{} -} - - -func (p *IndexItem) GetIndexID() IndexID { - return p.IndexID -} - -func (p *IndexItem) GetIndexName() string { - return p.IndexName -} -var IndexItem_SchemaID_DEFAULT *SchemaID -func (p *IndexItem) GetSchemaID() *SchemaID { - if !p.IsSetSchemaID() { - return IndexItem_SchemaID_DEFAULT - } -return p.SchemaID -} - -func (p *IndexItem) GetSchemaName() string { - return p.SchemaName -} - -func (p *IndexItem) GetFields() []*ColumnDef { - return p.Fields -} -func (p *IndexItem) IsSetSchemaID() bool { - return p.SchemaID != nil -} - -func (p *IndexItem) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - case 3: - if err := p.ReadField3(iprot); err != nil { - return err - } - case 4: - if err := p.ReadField4(iprot); err != nil { - return err - } - case 5: - if err := p.ReadField5(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *IndexItem) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := IndexID(v) - p.IndexID = temp -} - return nil -} - -func (p *IndexItem) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.IndexName = v -} - return nil -} - -func (p *IndexItem) ReadField3(iprot thrift.Protocol) error { - p.SchemaID = NewSchemaID() - if err := p.SchemaID.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SchemaID), err) - } - return nil -} - -func (p *IndexItem) ReadField4(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - p.SchemaName = v -} - return nil -} - -func (p *IndexItem) ReadField5(iprot thrift.Protocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*ColumnDef, 0, size) - p.Fields = tSlice - for i := 0; i < size; i ++ { - _elem1 := NewColumnDef() - if err := _elem1.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) - } - p.Fields = append(p.Fields, _elem1) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *IndexItem) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("IndexItem"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { return err } - if err := p.writeField4(oprot); err != nil { return err } - if err := p.writeField5(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *IndexItem) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("index_id", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:index_id: ", p), err) } - if err := oprot.WriteI32(int32(p.IndexID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.index_id (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:index_id: ", p), err) } - return err -} - -func (p *IndexItem) writeField2(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("index_name", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:index_name: ", p), err) } - if err := oprot.WriteString(string(p.IndexName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.index_name (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:index_name: ", p), err) } - return err -} - -func (p *IndexItem) writeField3(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("schema_id", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:schema_id: ", p), err) } - if err := p.SchemaID.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SchemaID), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:schema_id: ", p), err) } - return err -} - -func (p *IndexItem) writeField4(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("schema_name", thrift.STRING, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:schema_name: ", p), err) } - if err := oprot.WriteString(string(p.SchemaName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.schema_name (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:schema_name: ", p), err) } - return err -} - -func (p *IndexItem) writeField5(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("fields", thrift.LIST, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:fields: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Fields)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Fields { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:fields: ", p), err) } - return err -} - -func (p *IndexItem) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("IndexItem(%+v)", *p) -} - -// Attributes: -// - Ip -// - Port -type HostAddr struct { - Ip IPv4 `thrift:"ip,1" db:"ip" json:"ip"` - Port Port `thrift:"port,2" db:"port" json:"port"` -} - -func NewHostAddr() *HostAddr { - return &HostAddr{} -} - - -func (p *HostAddr) GetIp() IPv4 { - return p.Ip -} - -func (p *HostAddr) GetPort() Port { - return p.Port -} -func (p *HostAddr) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *HostAddr) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := IPv4(v) - p.Ip = temp -} - return nil -} - -func (p *HostAddr) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - temp := Port(v) - p.Port = temp -} - return nil -} - -func (p *HostAddr) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("HostAddr"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *HostAddr) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("ip", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ip: ", p), err) } - if err := oprot.WriteI32(int32(p.Ip)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ip (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ip: ", p), err) } - return err -} - -func (p *HostAddr) writeField2(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("port", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) } - if err := oprot.WriteI32(int32(p.Port)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) } - return err -} - -func (p *HostAddr) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("HostAddr(%+v)", *p) -} - -// Attributes: -// - Key -// - Value -type Pair struct { - Key string `thrift:"key,1" db:"key" json:"key"` - Value string `thrift:"value,2" db:"value" json:"value"` -} - -func NewPair() *Pair { - return &Pair{} -} - - -func (p *Pair) GetKey() string { - return p.Key -} - -func (p *Pair) GetValue() string { - return p.Value -} -func (p *Pair) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Pair) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Key = v -} - return nil -} - -func (p *Pair) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.Value = v -} - return nil -} - -func (p *Pair) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("Pair"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Pair) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) } - if err := oprot.WriteString(string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) } - return err -} - -func (p *Pair) writeField2(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) } - if err := oprot.WriteString(string(p.Value)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) } - return err -} - -func (p *Pair) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Pair(%+v)", *p) -} - -// Attributes: -// - User -// - SpaceID -// - RoleType -type RoleItem struct { - User string `thrift:"user,1" db:"user" json:"user"` - SpaceID GraphSpaceID `thrift:"space_id,2" db:"space_id" json:"space_id"` - RoleType RoleType `thrift:"role_type,3" db:"role_type" json:"role_type"` -} - -func NewRoleItem() *RoleItem { - return &RoleItem{} -} - - -func (p *RoleItem) GetUser() string { - return p.User -} - -func (p *RoleItem) GetSpaceID() GraphSpaceID { - return p.SpaceID -} - -func (p *RoleItem) GetRoleType() RoleType { - return p.RoleType -} -func (p *RoleItem) Read(iprot thrift.Protocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - case 3: - if err := p.ReadField3(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RoleItem) ReadField1(iprot thrift.Protocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.User = v -} - return nil -} - -func (p *RoleItem) ReadField2(iprot thrift.Protocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - temp := GraphSpaceID(v) - p.SpaceID = temp -} - return nil -} - -func (p *RoleItem) ReadField3(iprot thrift.Protocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - temp := RoleType(v) - p.RoleType = temp -} - return nil -} - -func (p *RoleItem) Write(oprot thrift.Protocol) error { - if err := oprot.WriteStructBegin("RoleItem"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if err := p.writeField1(oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { return err } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *RoleItem) writeField1(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("user", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:user: ", p), err) } - if err := oprot.WriteString(string(p.User)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.user (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:user: ", p), err) } - return err -} - -func (p *RoleItem) writeField2(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("space_id", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:space_id: ", p), err) } - if err := oprot.WriteI32(int32(p.SpaceID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.space_id (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:space_id: ", p), err) } - return err -} - -func (p *RoleItem) writeField3(oprot thrift.Protocol) (err error) { - if err := oprot.WriteFieldBegin("role_type", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:role_type: ", p), err) } - if err := oprot.WriteI32(int32(p.RoleType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.role_type (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:role_type: ", p), err) } - return err -} - -func (p *RoleItem) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RoleItem(%+v)", *p) -} - diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml deleted file mode 100644 index 9f556934..00000000 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 - - tip - -go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE deleted file mode 100644 index 8dada3ed..00000000 --- a/vendor/gopkg.in/yaml.v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml deleted file mode 100644 index 8da58fbf..00000000 --- a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE deleted file mode 100644 index 866d74a7..00000000 --- a/vendor/gopkg.in/yaml.v2/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md deleted file mode 100644 index b50c6e87..00000000 --- a/vendor/gopkg.in/yaml.v2/README.md +++ /dev/null @@ -1,133 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go deleted file mode 100644 index 1f7e87e6..00000000 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ /dev/null @@ -1,739 +0,0 @@ -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go deleted file mode 100644 index 53108765..00000000 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ /dev/null @@ -1,813 +0,0 @@ -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "math" - "reflect" - "strconv" - "time" -) - -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - // For an alias node, alias holds the resolved alias. - alias *node - value string - implicit bool - children []*node - anchors map[string]*node -} - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node - doneInit bool -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - if len(b) == 0 { - b = []byte{'\n'} - } - yaml_parser_set_input_string(&p.parser, b) - return &p -} - -func newParserFromReader(r io.Reader) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - yaml_parser_set_input_reader(&p.parser, r) - return &p -} - -func (p *parser) init() { - if p.doneInit { - return - } - p.expect(yaml_STREAM_START_EVENT) - p.doneInit = true -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -// expect consumes an event from the event stream and -// checks that it's of the expected type. -func (p *parser) expect(e yaml_event_type_t) { - if p.event.typ == yaml_NO_EVENT { - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - } - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - if p.event.typ != e { - p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) - p.fail() - } - yaml_event_delete(&p.event) - p.event.typ = yaml_NO_EVENT -} - -// peek peeks at the next event in the event stream, -// puts the results into p.event and returns the event type. -func (p *parser) peek() yaml_event_type_t { - if p.event.typ != yaml_NO_EVENT { - return p.event.typ - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - return p.event.typ -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *node, anchor []byte) { - if anchor != nil { - p.doc.anchors[string(anchor)] = n - } -} - -func (p *parser) parse() *node { - p.init() - switch p.peek() { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - default: - panic("attempted to parse unknown event: " + p.event.typ.String()) - } -} - -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, - } -} - -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) - p.doc = n - p.expect(yaml_DOCUMENT_START_EVENT) - n.children = append(n.children, p.parse()) - p.expect(yaml_DOCUMENT_END_EVENT) - return n -} - -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - n.alias = p.doc.anchors[n.value] - if n.alias == nil { - failf("unknown anchor '%s' referenced", n.value) - } - p.expect(yaml_ALIAS_EVENT) - return n -} - -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit - p.anchor(n, p.event.anchor) - p.expect(yaml_SCALAR_EVENT) - return n -} - -func (p *parser) sequence() *node { - n := p.node(sequenceNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_SEQUENCE_START_EVENT) - for p.peek() != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) - } - p.expect(yaml_SEQUENCE_END_EVENT) - return n -} - -func (p *parser) mapping() *node { - n := p.node(mappingNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_MAPPING_START_EVENT) - for p.peek() != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) - } - p.expect(yaml_MAPPING_END_EVENT) - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *node - aliases map[*node]bool - mapType reflect.Type - terrors []string - strict bool - - decodeCount int - aliasCount int - aliasDepth int -} - -var ( - mapItemType = reflect.TypeOf(MapItem{}) - durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - -func newDecoder(strict bool) *decoder { - d := &decoder{mapType: defaultMapType, strict: strict} - d.aliases = make(map[*node]bool) - return d -} - -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag - } - value := n.value - if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -const ( - // 400,000 decode operations is ~500kb of dense object declarations, or ~5kb of dense object declarations with 10000% alias expansion - alias_ratio_range_low = 400000 - // 4,000,000 decode operations is ~5MB of dense object declarations, or ~4.5MB of dense object declarations with 10% alias expansion - alias_ratio_range_high = 4000000 - // alias_ratio_range is the range over which we scale allowed alias ratios - alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) -) - -func allowedAliasRatio(decodeCount int) float64 { - switch { - case decodeCount <= alias_ratio_range_low: - // allow 99% to come from alias expansion for small-to-medium documents - return 0.99 - case decodeCount >= alias_ratio_range_high: - // allow 10% to come from alias expansion for very large documents - return 0.10 - default: - // scale smoothly from 99% down to 10% over the range. - // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. - // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). - return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) - } -} - -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { - d.decodeCount++ - if d.aliasDepth > 0 { - d.aliasCount++ - } - if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { - failf("document contains excessive aliasing") - } - switch n.kind { - case documentNode: - return d.document(n, out) - case aliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.kind { - case scalarNode: - good = d.scalar(n, out) - case mappingNode: - good = d.mapping(n, out) - case sequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) - } - return good -} - -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { - d.doc = n - d.unmarshal(n.children[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - if d.aliases[n] { - // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.value) - } - d.aliases[n] = true - d.aliasDepth++ - good = d.unmarshal(n.alias, out) - d.aliasDepth-- - delete(d.aliases, n) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *node, out reflect.Value) bool { - var tag string - var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value - } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true - } - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - // We've resolved to exactly the type we want, so use that. - out.Set(resolvedv) - return true - } - // Perhaps we can use the value as a TextUnmarshaler to - // set its value. - if out.CanAddr() { - u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) - if ok { - var text []byte - if tag == yaml_BINARY_TAG { - text = []byte(resolved.(string)) - } else { - // We let any value be unmarshaled into TextUnmarshaler. - // That might be more lax than we'd like, but the - // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.value) - } - err := u.UnmarshalText(text) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == yaml_BINARY_TAG { - out.SetString(resolved.(string)) - return true - } - if resolved != nil { - out.SetString(n.value) - return true - } - case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else if tag == yaml_TIMESTAMP_TAG { - // It looks like a timestamp but for backward compatibility - // reasons we set it as a string, so that code that unmarshals - // timestamp-like values into interface{} will continue to - // see a string and not a time.Time. - // TODO(v3) Drop this. - out.Set(reflect.ValueOf(n.value)) - } else { - out.Set(reflect.ValueOf(resolved)) - } - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch resolved := resolved.(type) { - case int: - if !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case int64: - if !out.OverflowInt(resolved) { - out.SetInt(resolved) - return true - } - case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - return true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - return true - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - return true - case int64: - out.SetFloat(float64(resolved)) - return true - case uint64: - out.SetFloat(float64(resolved)) - return true - case float64: - out.SetFloat(resolved) - return true - } - case reflect.Struct: - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - out.Set(resolvedv) - return true - } - case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - return true - } - } - d.terror(n, tag, out) - return false -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - l := len(n.children) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Array: - if l != out.Len() { - failf("invalid array: want %d elements but got %d", out.Len(), l) - } - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, yaml_SEQ_TAG, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - if out.Kind() != reflect.Array { - out.Set(out.Slice(0, j)) - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) - case reflect.Map: - // okay - case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) - } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true - } - default: - d.terror(n, yaml_MAP_TAG, out) - return false - } - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - l := len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - d.setMapIndex(n.children[i+1], out, k, e) - } - } - } - d.mapType = mapType - return true -} - -func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { - if d.strict && out.MapIndex(k) != zeroValue { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) - return - } - out.SetMapIndex(k, v) -} - -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yaml_MAP_TAG, out) - return false - } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - item := MapItem{} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } - } - } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType - return true -} - -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - name := settableValueOf("") - l := len(n.children) - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - var doneFields []bool - if d.strict { - doneFields = make([]bool, len(sinfo.FieldsList)) - } - for i := 0; i < l; i += 2 { - ni := n.children[i] - if isMerge(ni) { - d.merge(n.children[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.strict { - if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) - continue - } - doneFields[info.Id] = true - } - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = out.FieldByIndex(info.Inline) - } - d.unmarshal(n.children[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.children[i+1], value) - d.setMapIndex(n.children[i+1], inlineMap, name, value) - } else if d.strict { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - an, ok := d.doc.anchors[n.value] - if ok && an.kind != mappingNode { - failWantMap() - } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - an, ok := d.doc.anchors[ni.value] - if ok && an.kind != mappingNode { - failWantMap() - } - } else if ni.kind != mappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) -} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go deleted file mode 100644 index a1c2cc52..00000000 --- a/vendor/gopkg.in/yaml.v2/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go deleted file mode 100644 index 0ee738e1..00000000 --- a/vendor/gopkg.in/yaml.v2/encode.go +++ /dev/null @@ -1,390 +0,0 @@ -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// jsonNumber is the interface of the encoding/json.Number datatype. -// Repeating the interface here avoids a dependency on encoding/json, and also -// supports other libraries like jsoniter, which use a similar datatype with -// the same interface. Detecting this interface is useful when dealing with -// structures containing json.Number, which is a string under the hood. The -// encoder should prefer the use of Int64(), Float64() and string(), in that -// order, when encoding this type. -type jsonNumber interface { - Float64() (float64, error) - Int64() (int64, error) - String() string -} - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool - // doneInit holds whether the initial stream_start_event has been - // emitted. - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, w) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yaml_emitter_emit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yaml_document_end_event_initialize(&e.event, true) - e.emit() -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch m := iface.(type) { - case jsonNumber: - integer, err := m.Int64() - if err == nil { - // In this case the json.Number is a valid int64 - in = reflect.ValueOf(integer) - break - } - float, err := m.Float64() - if err == nil { - // In this case the json.Number is a valid float64 - in = reflect.ValueOf(float) - break - } - // fallback case - no number could be obtained - in = reflect.ValueOf(m.String()) - case time.Time, *time.Time: - // Although time.Time implements TextMarshaler, - // we don't want to treat it as a string for YAML - // purposes because YAML has special support for - // timestamps. - case Marshaler: - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - case encoding.TextMarshaler: - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.Type() == ptrTimeType { - e.timev(tag, in.Elem()) - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - if in.Type() == timeType { - e.timev(tag, in) - } else { - e.structv(tag, in) - } - case reflect.Slice, reflect.Array: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == yaml_BINARY_TAG { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = yaml_BINARY_TAG - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - style = yaml_LITERAL_SCALAR_STYLE - case canUsePlain: - style = yaml_PLAIN_SCALAR_STYLE - default: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod deleted file mode 100644 index 1934e876..00000000 --- a/vendor/gopkg.in/yaml.v2/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module "gopkg.in/yaml.v2" - -require ( - "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 -) diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go deleted file mode 100644 index 81d05dfe..00000000 --- a/vendor/gopkg.in/yaml.v2/parserc.go +++ /dev/null @@ -1,1095 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go deleted file mode 100644 index 7c1f5fac..00000000 --- a/vendor/gopkg.in/yaml.v2/readerc.go +++ /dev/null @@ -1,412 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go deleted file mode 100644 index 4120e0c9..00000000 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ /dev/null @@ -1,258 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func resolve(tag string, in string) (rtag string, out interface{}) { - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: - return - case yaml_FLOAT_TAG: - if rtag == yaml_INT_TAG { - switch v := out.(type) { - case int64: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - case int: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - } - } - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == yaml_TIMESTAMP_TAG { - t, ok := parseTimestamp(in) - if ok { - return yaml_TIMESTAMP_TAG, t - } - } - - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - if yamlStyleFloat.MatchString(plain) { - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - } - default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") - } - } - return yaml_STR_TAG, in -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go deleted file mode 100644 index 570b8ecd..00000000 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ /dev/null @@ -1,2712 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - - // We are finished. - if !need_more_tokens { - break - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - } - } - return true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - return true -} - -// max_flow_level limits the flow_level -const max_flow_level = 10000 - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // Increase the flow level. - parser.flow_level++ - if parser.flow_level > max_flow_level { - return yaml_parser_set_scanner_error(parser, - "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_flow_level)) - } - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - return true -} - -// max_indents limits the indents stack size -const max_indents = 10000 - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - if len(parser.indents) > max_indents { - return yaml_parser_set_scanner_error(parser, - "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_indents)) - } - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the indentation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if simple_key.possible { - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go deleted file mode 100644 index 4c45e660..00000000 --- a/vendor/gopkg.in/yaml.v2/sorter.go +++ /dev/null @@ -1,113 +0,0 @@ -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - return bl - } - var ai, bi int - var an, bn int64 - if ar[i] == '0' || br[i] == '0' { - for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { - if ar[j] != '0' { - an = 1 - bn = 1 - break - } - } - } - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go deleted file mode 100644 index a2dde608..00000000 --- a/vendor/gopkg.in/yaml.v2/writerc.go +++ /dev/null @@ -1,26 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go deleted file mode 100644 index de85aa4c..00000000 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ /dev/null @@ -1,466 +0,0 @@ -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" -) - -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} -} - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. -type Unmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - return unmarshal(in, out, false) -} - -// UnmarshalStrict is like Unmarshal except that any fields that are found -// in the data that do not have corresponding struct members, or mapping -// keys that are duplicates, will result in -// an error. -func UnmarshalStrict(in []byte, out interface{}) (err error) { - return unmarshal(in, out, true) -} - -// A Decorder reads and decodes YAML values from an input stream. -type Decoder struct { - strict bool - parser *parser -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read -// data from r beyond the YAML values requested. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - parser: newParserFromReader(r), - } -} - -// SetStrict sets whether strict decoding behaviour is enabled when -// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. -func (dec *Decoder) SetStrict(strict bool) { - dec.strict = strict -} - -// Decode reads the next YAML-encoded value from its input -// and stores it in the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (dec *Decoder) Decode(v interface{}) (err error) { - d := newDecoder(dec.strict) - defer handleErr(&err) - node := dec.parser.parse() - if node == nil { - return io.EOF - } - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(node, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -func unmarshal(in []byte, out interface{}, strict bool) (err error) { - defer handleErr(&err) - d := newDecoder(strict) - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only marshalled if they are exported (have an upper case -// first letter), and are marshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -// An Encoder writes YAML values to an output stream. -type Encoder struct { - encoder *encoder -} - -// NewEncoder returns a new encoder that writes to w. -// The Encoder should be closed after use to flush all data -// to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - encoder: newEncoderWithWriter(w), - } -} - -// Encode writes the YAML encoding of v to the stream. -// If multiple items are encoded to the stream, the -// second and subsequent document will be preceded -// with a "---" document separator, but the first will not. -// -// See the documentation for Marshal for details about the conversion of Go -// values to YAML. -func (e *Encoder) Encode(v interface{}) (err error) { - defer handleErr(&err) - e.encoder.marshalDoc("", reflect.ValueOf(v)) - return nil -} - -// Close closes the encoder by writing any remaining data. -// It does not write a stream terminating string "...". -func (e *Encoder) Close() (err error) { - defer handleErr(&err) - e.encoder.finish() - return nil -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -// IsZeroer is used to check whether an object is zero to -// determine whether it should be omitted when marshaling -// with the omitempty flag. One notable implementation -// is time.Time. -type IsZeroer interface { - IsZero() bool -} - -func isZero(v reflect.Value) bool { - kind := v.Kind() - if z, ok := v.Interface().(IsZeroer); ok { - if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { - return true - } - return z.IsZero() - } - switch kind { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go deleted file mode 100644 index e25cee56..00000000 --- a/vendor/gopkg.in/yaml.v2/yamlh.go +++ /dev/null @@ -1,738 +0,0 @@ -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go deleted file mode 100644 index 8110ce3c..00000000 --- a/vendor/gopkg.in/yaml.v2/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/modules.txt b/vendor/modules.txt deleted file mode 100644 index aafd0418..00000000 --- a/vendor/modules.txt +++ /dev/null @@ -1,8 +0,0 @@ -# github.com/facebook/fbthrift v0.0.0-20190922225929-2f9839604e25 -github.com/facebook/fbthrift/thrift/lib/go/thrift -# github.com/vesoft-inc/nebula-go v1.1.0 -github.com/vesoft-inc/nebula-go -github.com/vesoft-inc/nebula-go/nebula -github.com/vesoft-inc/nebula-go/nebula/graph -# gopkg.in/yaml.v2 v2.2.4 -gopkg.in/yaml.v2