From 44c654a1e5d516ad74991d9ae261b8d2ec54a6e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Mon, 25 May 2020 17:04:48 +0200 Subject: [PATCH] Update sarama to fix Kerberos authentication for Kafka (#18711) --- NOTICE.txt | 16 +- go.mod | 8 +- go.sum | 40 +- .../consumergroup_integration_test.go | 5 + .../github.com/Shopify/sarama/.golangci.yml | 74 ++++ vendor/github.com/Shopify/sarama/.travis.yml | 38 -- vendor/github.com/Shopify/sarama/CHANGELOG.md | 69 ++++ vendor/github.com/Shopify/sarama/Makefile | 67 +--- vendor/github.com/Shopify/sarama/README.md | 2 +- vendor/github.com/Shopify/sarama/Vagrantfile | 10 +- .../Shopify/sarama/acl_create_request.go | 4 + .../Shopify/sarama/acl_create_response.go | 6 +- .../Shopify/sarama/acl_delete_request.go | 4 + .../Shopify/sarama/acl_delete_response.go | 6 +- .../Shopify/sarama/acl_describe_request.go | 4 + .../Shopify/sarama/acl_describe_response.go | 6 +- .../sarama/add_offsets_to_txn_request.go | 4 + .../sarama/add_offsets_to_txn_response.go | 4 + .../sarama/add_partitions_to_txn_request.go | 4 + .../sarama/add_partitions_to_txn_response.go | 4 + vendor/github.com/Shopify/sarama/admin.go | 356 +++++++++++++++--- .../Shopify/sarama/alter_configs_request.go | 4 + .../Shopify/sarama/alter_configs_response.go | 8 +- .../alter_partition_reassignments_request.go | 130 +++++++ .../alter_partition_reassignments_response.go | 157 ++++++++ .../Shopify/sarama/api_versions_request.go | 4 + .../Shopify/sarama/api_versions_response.go | 6 +- .../Shopify/sarama/async_producer.go | 62 ++- .../Shopify/sarama/balance_strategy.go | 32 +- vendor/github.com/Shopify/sarama/broker.go | 170 +++++++-- vendor/github.com/Shopify/sarama/client.go | 83 +++- vendor/github.com/Shopify/sarama/compress.go | 131 ++++++- vendor/github.com/Shopify/sarama/config.go | 74 +++- .../Shopify/sarama/config_resource_type.go | 26 +- vendor/github.com/Shopify/sarama/consumer.go | 15 + .../Shopify/sarama/consumer_group.go | 66 ++-- .../sarama/consumer_metadata_request.go | 4 + .../sarama/consumer_metadata_response.go | 4 + .../sarama/create_partitions_request.go | 4 + .../sarama/create_partitions_response.go | 4 + .../Shopify/sarama/create_topics_request.go | 4 + .../Shopify/sarama/create_topics_response.go | 4 + .../github.com/Shopify/sarama/decompress.go | 2 +- .../Shopify/sarama/delete_groups_request.go | 4 + .../Shopify/sarama/delete_groups_response.go | 4 + .../Shopify/sarama/delete_records_request.go | 4 + .../Shopify/sarama/delete_records_response.go | 4 + .../Shopify/sarama/delete_topics_request.go | 4 + .../Shopify/sarama/delete_topics_response.go | 4 + .../sarama/describe_configs_request.go | 4 + .../sarama/describe_configs_response.go | 9 +- .../Shopify/sarama/describe_groups_request.go | 4 + .../sarama/describe_groups_response.go | 4 + .../sarama/describe_log_dirs_request.go | 4 + .../sarama/describe_log_dirs_response.go | 10 + vendor/github.com/Shopify/sarama/dev.yml | 2 +- .../Shopify/sarama/encoder_decoder.go | 5 + .../Shopify/sarama/end_txn_request.go | 4 + .../Shopify/sarama/end_txn_response.go | 4 + vendor/github.com/Shopify/sarama/errors.go | 16 + .../Shopify/sarama/fetch_request.go | 153 +++++++- .../Shopify/sarama/fetch_response.go | 79 +++- .../sarama/find_coordinator_request.go | 4 + .../sarama/find_coordinator_response.go | 4 + vendor/github.com/Shopify/sarama/go.mod | 27 +- vendor/github.com/Shopify/sarama/go.sum | 68 ++-- .../Shopify/sarama/gssapi_kerberos.go | 5 +- .../Shopify/sarama/heartbeat_request.go | 4 + .../Shopify/sarama/heartbeat_response.go | 4 + .../sarama/init_producer_id_request.go | 4 + .../sarama/init_producer_id_response.go | 4 + .../Shopify/sarama/join_group_request.go | 4 + .../Shopify/sarama/join_group_response.go | 4 + .../Shopify/sarama/leave_group_request.go | 4 + .../Shopify/sarama/leave_group_response.go | 4 + .../Shopify/sarama/list_groups_request.go | 4 + .../Shopify/sarama/list_groups_response.go | 4 + .../list_partition_reassignments_request.go | 98 +++++ .../list_partition_reassignments_response.go | 169 +++++++++ vendor/github.com/Shopify/sarama/message.go | 1 - .../Shopify/sarama/metadata_request.go | 4 + .../Shopify/sarama/metadata_response.go | 5 +- .../github.com/Shopify/sarama/mockbroker.go | 40 +- .../github.com/Shopify/sarama/mockkerberos.go | 2 +- .../Shopify/sarama/mockresponses.go | 276 +++++++++++--- .../Shopify/sarama/offset_commit_request.go | 4 + .../Shopify/sarama/offset_commit_response.go | 4 + .../Shopify/sarama/offset_fetch_request.go | 4 + .../Shopify/sarama/offset_fetch_response.go | 4 + .../Shopify/sarama/offset_manager.go | 8 +- .../Shopify/sarama/offset_request.go | 6 +- .../Shopify/sarama/offset_response.go | 4 + .../Shopify/sarama/packet_decoder.go | 6 + .../Shopify/sarama/packet_encoder.go | 7 + .../github.com/Shopify/sarama/prep_encoder.go | 49 +++ .../Shopify/sarama/produce_request.go | 6 + .../Shopify/sarama/produce_response.go | 55 ++- .../github.com/Shopify/sarama/produce_set.go | 31 +- .../github.com/Shopify/sarama/real_decoder.go | 98 ++++- .../github.com/Shopify/sarama/real_encoder.go | 52 +++ vendor/github.com/Shopify/sarama/records.go | 1 - vendor/github.com/Shopify/sarama/request.go | 30 +- .../Shopify/sarama/response_header.go | 9 +- vendor/github.com/Shopify/sarama/sarama.go | 4 + .../sarama/sasl_authenticate_request.go | 4 + .../sarama/sasl_authenticate_response.go | 4 + .../Shopify/sarama/sasl_handshake_request.go | 4 + .../Shopify/sarama/sasl_handshake_response.go | 4 + .../Shopify/sarama/sync_group_request.go | 4 + .../Shopify/sarama/sync_group_response.go | 4 + .../sarama/txn_offset_commit_request.go | 4 + .../sarama/txn_offset_commit_response.go | 4 + vendor/github.com/Shopify/sarama/utils.go | 10 +- vendor/github.com/Shopify/sarama/zstd.go | 3 +- vendor/github.com/hashicorp/go-uuid/uuid.go | 22 +- .../klauspost/compress/flate/deflate.go | 6 +- .../compress/flate/huffman_bit_writer.go | 52 ++- .../klauspost/compress/flate/huffman_code.go | 76 ++-- .../compress/flate/huffman_sortByFreq.go | 178 +++++++++ .../compress/flate/huffman_sortByLiteral.go | 201 ++++++++++ .../klauspost/compress/flate/token.go | 42 ++- .../klauspost/compress/huff0/bitwriter.go | 13 +- .../klauspost/compress/huff0/compress.go | 70 ++-- .../klauspost/compress/huff0/huff0.go | 7 +- .../klauspost/compress/zstd/README.md | 18 +- .../klauspost/compress/zstd/blockdec.go | 4 + .../klauspost/compress/zstd/blockenc.go | 45 ++- .../klauspost/compress/zstd/decoder.go | 29 ++ .../klauspost/compress/zstd/enc_dfast.go | 313 +++++++++++++++ .../klauspost/compress/zstd/enc_fast.go | 245 ++++++++++++ .../klauspost/compress/zstd/encoder.go | 75 +++- .../compress/zstd/encoder_options.go | 11 + .../klauspost/compress/zstd/snappy.go | 8 +- vendor/github.com/pierrec/lz4/README.md | 25 +- vendor/github.com/pierrec/lz4/block.go | 16 +- vendor/github.com/pierrec/lz4/decode_other.go | 10 +- vendor/github.com/pierrec/lz4/errors.go | 2 + vendor/github.com/pierrec/lz4/lz4.go | 61 ++- vendor/github.com/pierrec/lz4/reader.go | 32 +- vendor/github.com/pierrec/lz4/writer.go | 189 ++++++++-- .../rcrowley/go-metrics/.travis.yml | 17 +- .../github.com/rcrowley/go-metrics/README.md | 3 + .../github.com/rcrowley/go-metrics/debug.go | 30 +- vendor/github.com/rcrowley/go-metrics/log.go | 24 +- .../rcrowley/go-metrics/registry.go | 24 +- .../github.com/rcrowley/go-metrics/runtime.go | 130 +++---- .../jcmturner/gokrb5.v7/client/TGSExchange.go | 2 +- .../jcmturner/gokrb5.v7/client/client.go | 12 + .../jcmturner/gokrb5.v7/client/passwd.go | 2 +- .../jcmturner/gokrb5.v7/client/settings.go | 2 +- .../jcmturner/gokrb5.v7/config/hosts.go | 62 +-- .../gokrb5.v7/credentials/credentials.go | 5 + .../jcmturner/gokrb5.v7/keytab/keytab.go | 145 +++++-- .../jcmturner/gokrb5.v7/spnego/krb5Token.go | 6 + .../gokrb5.v7/spnego/negotiationToken.go | 4 - .../jcmturner/gokrb5.v7/spnego/spnego.go | 6 +- vendor/modules.txt | 14 +- 157 files changed, 4566 insertions(+), 889 deletions(-) create mode 100644 vendor/github.com/Shopify/sarama/.golangci.yml delete mode 100644 vendor/github.com/Shopify/sarama/.travis.yml create mode 100644 vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go create mode 100644 vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go create mode 100644 vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go create mode 100644 vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go diff --git a/NOTICE.txt b/NOTICE.txt index 3f62f115982..5c0fcd5e4ae 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1480,7 +1480,7 @@ SOFTWARE. -------------------------------------------------------------------- Dependency: github.com/eapache/go-resiliency -Version: v1.1.0 +Version: v1.2.0 License type (autodetected): MIT ./vendor/github.com/eapache/go-resiliency/LICENSE: -------------------------------------------------------------------- @@ -4016,7 +4016,7 @@ Exhibit B - “Incompatible With Secondary Licenses” Notice -------------------------------------------------------------------- Dependency: github.com/hashicorp/go-uuid -Version: v1.0.1 +Version: v1.0.2 License type (autodetected): MPL-2.0 ./vendor/github.com/hashicorp/go-uuid/LICENSE: -------------------------------------------------------------------- @@ -5452,8 +5452,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------- Dependency: github.com/klauspost/compress -Version: v1.9.3 -Revision: c099ac9f21dd +Version: v1.9.8 License type (autodetected): BSD-3-Clause ./vendor/github.com/klauspost/compress/LICENSE: -------------------------------------------------------------------- @@ -6436,7 +6435,7 @@ See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. -------------------------------------------------------------------- Dependency: github.com/pierrec/lz4 -Version: v2.2.6 +Version: v2.4.1 License type (autodetected): BSD-3-Clause ./vendor/github.com/pierrec/lz4/LICENSE: -------------------------------------------------------------------- @@ -6726,7 +6725,7 @@ See https://github.com/samuel/go-zookeeper/blob/master/LICENSE for license detai -------------------------------------------------------------------- Dependency: github.com/rcrowley/go-metrics -Revision: 3113b8401b8a +Revision: cac0b30c2563 License type (autodetected): BSD-2-Clause ./vendor/github.com/rcrowley/go-metrics/LICENSE: -------------------------------------------------------------------- @@ -7056,7 +7055,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- Dependency: github.com/Shopify/sarama Overwrite: github.com/elastic/sarama -Overwrite-Revision: 355d120d0970 +Overwrite-Version: v1.24.1 +Overwrite-Revision: cbc80333a91e License type (autodetected): MIT ./vendor/github.com/Shopify/sarama/LICENSE: -------------------------------------------------------------------- @@ -8334,7 +8334,7 @@ Apache License 2.0 -------------------------------------------------------------------- Dependency: gopkg.in/jcmturner/gokrb5.v7 -Version: v7.3.0 +Version: v7.5.0 License type (autodetected): Apache-2.0 ./vendor/gopkg.in/jcmturner/gokrb5.v7/LICENSE: -------------------------------------------------------------------- diff --git a/go.mod b/go.mod index c419ee14a97..f6347cafbff 100644 --- a/go.mod +++ b/go.mod @@ -97,13 +97,11 @@ require ( github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d // indirect github.com/insomniacslk/dhcp v0.0.0-20180716145214-633285ba52b2 - github.com/jcmturner/gofork v1.0.0 // indirect github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5 github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 github.com/josephspurrier/goversioninfo v0.0.0-20190209210621-63e6d1acd3dd github.com/jpillora/backoff v1.0.0 // indirect github.com/jstemmer/go-junit-report v0.9.1 - github.com/klauspost/compress v1.9.3-0.20191122130757-c099ac9f21dd // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/lib/pq v1.1.2-0.20190507191818-2ff3cb3adc01 github.com/magefile/mage v1.9.0 @@ -128,7 +126,7 @@ require ( github.com/prometheus/common v0.7.0 github.com/prometheus/procfs v0.0.11 github.com/prometheus/prometheus v2.5.0+incompatible - github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a + github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 github.com/reviewdog/reviewdog v0.9.17 github.com/samuel/go-parser v0.0.0-20130731160455-ca8abbf65d0e // indirect github.com/samuel/go-thrift v0.0.0-20140522043831-2187045faa54 @@ -163,7 +161,7 @@ require ( google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb google.golang.org/grpc v1.27.1 gopkg.in/inf.v0 v0.9.0 - gopkg.in/jcmturner/gokrb5.v7 v7.3.0 + gopkg.in/jcmturner/gokrb5.v7 v7.5.0 gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 gopkg.in/yaml.v2 v2.3.0 howett.net/plist v0.0.0-20181124034731-591f970eefbb @@ -177,7 +175,7 @@ require ( replace ( github.com/Azure/go-autorest => github.com/Azure/go-autorest v12.2.0+incompatible - github.com/Shopify/sarama => github.com/elastic/sarama v0.0.0-20191122160421-355d120d0970 + github.com/Shopify/sarama => github.com/elastic/sarama v1.24.1-elastic.0.20200519143807-cbc80333a91e github.com/docker/docker => github.com/docker/engine v0.0.0-20191113042239-ea84732a7725 github.com/docker/go-plugins-helpers => github.com/elastic/go-plugins-helpers v0.0.0-20200207104224-bdf17607b79f github.com/dop251/goja => github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 diff --git a/go.sum b/go.sum index 31d58334250..1fc941d2e46 100644 --- a/go.sum +++ b/go.sum @@ -214,8 +214,8 @@ github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 h1:RrkoB0pT3gnj github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= @@ -257,8 +257,8 @@ github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUt github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/elastic/gosigar v0.10.5 h1:GzPQ+78RaAb4J63unidA/JavQRKrB6s8IOzN6Ib59jo= github.com/elastic/gosigar v0.10.5/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= -github.com/elastic/sarama v0.0.0-20191122160421-355d120d0970 h1:rSo6gsz4zOanqtJ5fmZYQJvEJnA5YsVOB25casIwqUw= -github.com/elastic/sarama v0.0.0-20191122160421-355d120d0970/go.mod h1:fGP8eQ6PugKEI0iUETYYtnP6d1pH/bdDMTel1X5ajsU= +github.com/elastic/sarama v1.24.1-elastic.0.20200519143807-cbc80333a91e h1:2jm3380rkaGcosRpvtgIQrl7F5Cb99aFJYis7Y5hoJw= +github.com/elastic/sarama v1.24.1-elastic.0.20200519143807-cbc80333a91e/go.mod h1:X690XXMxlbtN8c7xcpsENKNlbj8VClCZ2hwSOhSyNmE= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -273,8 +273,8 @@ github.com/fatih/color v1.5.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg= -github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= +github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= +github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/garyburd/redigo v1.0.1-0.20160525165706-b8dc90050f24 h1:nREVDi4H8mwnNqfxFU9NMzZrDCg8TXbEatMvHozxKwU= github.com/garyburd/redigo v1.0.1-0.20160525165706-b8dc90050f24/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -399,8 +399,8 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.0.0 h1:21MVWPKDphxa7ineQQTrCU5brh7OuVVAzGOCnnCPtE8= github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -420,7 +420,6 @@ github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -453,9 +452,8 @@ github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.3-0.20191122130757-c099ac9f21dd h1:eTGTdO1ZbZ0HSC6TxDLtBl7W0fgFpGlbdPBK+IF0I0g= -github.com/klauspost/compress v1.9.3-0.20191122130757-c099ac9f21dd/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= +github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -463,6 +461,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= @@ -550,8 +550,8 @@ github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= -github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg= +github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrre/gotestcover v0.0.0-20160113212533-7b94f124d338 h1:/VAZ3an4jHXs+61iNHugNR1mG25MSpaxtMnwOJVEAQM= github.com/pierrre/gotestcover v0.0.0-20160113212533-7b94f124d338/go.mod h1:4xpMLz7RBWyB+ElzHu8Llua96TRCB3YwX+l5EP1wmHk= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -586,8 +586,8 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4 github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/reviewdog/errorformat v0.0.0-20200109134752-8983be9bc7dd h1:fvaEkjpr2NJbtnFRCft7D6y/mQ5/2OQU0pKJLW8dwFA= github.com/reviewdog/errorformat v0.0.0-20200109134752-8983be9bc7dd/go.mod h1:giYAXnpegRDPsXUO7TRpDKXJo1lFGYxyWRfEt5iQ+OA= github.com/reviewdog/reviewdog v0.9.17 h1:MKb3rlQZgkEXr3d85iqtYNITXn7gDJr2kT0IhgX/X9A= @@ -708,12 +708,12 @@ golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -789,7 +789,6 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -903,9 +902,8 @@ gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/gokrb5.v7 v7.3.0 h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4= -gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 h1:/saqWwm73dLmuzbNhe92F0QsZ/KiFND+esHco2v1hiY= diff --git a/metricbeat/module/kafka/consumergroup/consumergroup_integration_test.go b/metricbeat/module/kafka/consumergroup/consumergroup_integration_test.go index ee2ca076e23..fe9976fe966 100644 --- a/metricbeat/module/kafka/consumergroup/consumergroup_integration_test.go +++ b/metricbeat/module/kafka/consumergroup/consumergroup_integration_test.go @@ -100,6 +100,11 @@ func startConsumer(t *testing.T, host string, topic string) (io.Closer, error) { config.Net.SASL.Enable = true config.Net.SASL.User = kafkaSASLConsumerUsername config.Net.SASL.Password = kafkaSASLConsumerPassword + // The test panics unless CommitInterval is set due to the following bug in sarama: + // https://github.com/Shopify/sarama/issues/1638 + // To work around the issue we need to set CommitInterval, but now sarama emits + // a deprecation warning. + config.Consumer.Offsets.CommitInterval = 1 * time.Second return saramacluster.NewConsumer(brokers, "test-group", topics, config) } diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/Shopify/sarama/.golangci.yml new file mode 100644 index 00000000000..47624f3de3b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/.golangci.yml @@ -0,0 +1,74 @@ +run: + timeout: 5m + deadline: 10m + +linters-settings: + govet: + check-shadowing: false + golint: + min-confidence: 0 + gocyclo: + min-complexity: 99 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 3 + misspell: + locale: US + goimports: + local-prefixes: github.com/Shopify/sarama + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - wrapperFunc + - ifElseChain + funlen: + lines: 300 + statements: 300 + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + # - dupl + - errcheck + - funlen + # - gocritic + - gocyclo + - gofmt + - goimports + # - golint + - gosec + # - gosimple + - govet + # - ineffassign + - interfacer + # - misspell + # - nakedret + # - scopelint + # - staticcheck + - structcheck + # - stylecheck + - typecheck + - unconvert + - unused + - varcheck + - whitespace + # - goconst + # - gochecknoinits + +issues: + exclude: + - consider giving a name to these results + - include an explanation for nolint directive diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml deleted file mode 100644 index d60942369a3..00000000000 --- a/vendor/github.com/Shopify/sarama/.travis.yml +++ /dev/null @@ -1,38 +0,0 @@ -dist: xenial -language: go -go: -- 1.11.x -- 1.12.x -- 1.13.x - -env: - global: - - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095 - - TOXIPROXY_ADDR=http://localhost:8474 - - KAFKA_INSTALL_ROOT=/home/travis/kafka - - KAFKA_HOSTNAME=localhost - - DEBUG=true - matrix: - - KAFKA_VERSION=2.2.1 KAFKA_SCALA_VERSION=2.12 - - KAFKA_VERSION=2.3.0 KAFKA_SCALA_VERSION=2.12 - -before_install: -- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR} -- vagrant/install_cluster.sh -- vagrant/boot_cluster.sh -- vagrant/create_topics.sh -- vagrant/run_java_producer.sh - -install: make install_dependencies - -script: -- make test -- make vet -- make errcheck -- if [[ "$TRAVIS_GO_VERSION" == 1.13* ]]; then make fmt; fi - -after_success: -- go tool cover -func coverage.txt -- bash <(curl -s https://codecov.io/bash) - -after_script: vagrant/halt_cluster.sh diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md index dfa7e758cc1..2bebeb106e0 100644 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md @@ -1,5 +1,74 @@ # Changelog +#### Unreleased + +#### Version 1.26.1 (2020-02-04) + +Improvements: +- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539)) +- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595)) +- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573)) +- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592)) + +Bug Fixes: +- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590)) +- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589)) + +#### Version 1.26.0 (2020-01-24) + +New Features: +- Enable zstd compression + ([1574](https://github.com/Shopify/sarama/pull/1574), + [1582](https://github.com/Shopify/sarama/pull/1582)) +- Support headers in tools kafka-console-producer + ([1549](https://github.com/Shopify/sarama/pull/1549)) + +Improvements: +- Add SASL AuthIdentity to SASL frames (authzid) + ([1585](https://github.com/Shopify/sarama/pull/1585)). + +Bug Fixes: +- Sending messages with ZStd compression enabled fails in multiple ways + ([1252](https://github.com/Shopify/sarama/issues/1252)). +- Use the broker for any admin on BrokerConfig + ([1571](https://github.com/Shopify/sarama/pull/1571)). +- Set DescribeConfigRequest Version field + ([1576](https://github.com/Shopify/sarama/pull/1576)). +- ConsumerGroup flooding logs with client/metadata update req + ([1578](https://github.com/Shopify/sarama/pull/1578)). +- MetadataRequest version in DescribeCluster + ([1580](https://github.com/Shopify/sarama/pull/1580)). +- Fix deadlock in consumer group handleError + ([1581](https://github.com/Shopify/sarama/pull/1581)) +- Fill in the Fetch{Request,Response} protocol + ([1582](https://github.com/Shopify/sarama/pull/1582)). +- Retry topic request on ControllerNotAvailable + ([1586](https://github.com/Shopify/sarama/pull/1586)). + +#### Version 1.25.0 (2020-01-13) + +New Features: +- Support TLS protocol in kafka-producer-performance + ([1538](https://github.com/Shopify/sarama/pull/1538)). +- Add support for kafka 2.4.0 + ([1552](https://github.com/Shopify/sarama/pull/1552)). + +Improvements: +- Allow the Consumer to disable auto-commit offsets + ([1164](https://github.com/Shopify/sarama/pull/1164)). +- Produce records with consistent timestamps + ([1455](https://github.com/Shopify/sarama/pull/1455)). + +Bug Fixes: +- Fix incorrect SetTopicMetadata name mentions + ([1534](https://github.com/Shopify/sarama/pull/1534)). +- Fix client.tryRefreshMetadata Println + ([1535](https://github.com/Shopify/sarama/pull/1535)). +- Fix panic on calling updateMetadata on closed client + ([1531](https://github.com/Shopify/sarama/pull/1531)). +- Fix possible faulty metrics in TestFuncProducing + ([1545](https://github.com/Shopify/sarama/pull/1545)). + #### Version 1.24.1 (2019-10-31) New Features: diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile index 9c8329e2f31..c3b431a562d 100644 --- a/vendor/github.com/Shopify/sarama/Makefile +++ b/vendor/github.com/Shopify/sarama/Makefile @@ -1,56 +1,27 @@ -export GO111MODULE=on +default: fmt get update test lint -default: fmt vet errcheck test lint +GO := GO111MODULE=on GOPRIVATE=github.com/linkedin GOSUMDB=off go +GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG) +GOTEST := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic -# Taken from https://github.com/codecov/example-go#caveat-multiple-files -.PHONY: test -test: - echo "mode: atomic" > coverage.txt - for d in `go list ./...`; do \ - go test -p 1 -v -timeout 6m -race -coverprofile=profile.out -covermode=atomic $$d || exit 1; \ - if [ -f profile.out ]; then \ - tail +2 profile.out >> coverage.txt; \ - rm profile.out; \ - fi \ - done - -GOLINT := $(shell command -v golint) +FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go') +TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go') -.PHONY: lint -lint: -ifndef GOLINT - go get golang.org/x/lint/golint -endif - go list ./... | xargs golint - -.PHONY: vet -vet: - go vet ./... +get: + $(GO) get ./... + $(GO) mod verify + $(GO) mod tidy -ERRCHECK := $(shell command -v errcheck) -# See https://github.com/kisielk/errcheck/pull/141 for details on ignorepkg -.PHONY: errcheck -errcheck: -ifndef ERRCHECK - go get github.com/kisielk/errcheck -endif - errcheck -ignorepkg fmt github.com/Shopify/sarama/... +update: + $(GO) get -u -v all + $(GO) mod verify + $(GO) mod tidy -.PHONY: fmt fmt: - @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi + gofmt -s -l -w $(FILES) $(TESTS) -.PHONY : install_dependencies -install_dependencies: get - -.PHONY: get -get: - go get -v ./... - -.PHONY: clean -clean: - go clean ./... +lint: + golangci-lint run -.PHONY: tidy -tidy: - go mod tidy -v +test: + $(GOTEST) ./... diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md index 0206faca64f..9b7478d7c62 100644 --- a/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/Shopify/sarama/README.md @@ -20,7 +20,7 @@ You might also want to look at the [Frequently Asked Questions](https://github.c Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. This means we currently officially support -Go 1.11 through 1.13, and Kafka 2.1 through 2.3, although older releases are +Go 1.12 through 1.14, and Kafka 2.1 through 2.4, although older releases are still likely to work. Sarama follows semantic versioning and provides API stability via the gopkg.in service. diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile index f4b848a301b..07d7ffb8ff4 100644 --- a/vendor/github.com/Shopify/sarama/Vagrantfile +++ b/vendor/github.com/Shopify/sarama/Vagrantfile @@ -1,14 +1,8 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! -VAGRANTFILE_API_VERSION = "2" - # We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB MEMORY = 3072 -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "ubuntu/trusty64" +Vagrant.configure("2") do |config| + config.vm.box = "ubuntu/bionic64" config.vm.provision :shell, path: "vagrant/provision.sh" diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go index da1cdefc301..6d8a70e1a20 100644 --- a/vendor/github.com/Shopify/sarama/acl_create_request.go +++ b/vendor/github.com/Shopify/sarama/acl_create_request.go @@ -47,6 +47,10 @@ func (c *CreateAclsRequest) version() int16 { return c.Version } +func (c *CreateAclsRequest) headerVersion() int16 { + return 1 +} + func (c *CreateAclsRequest) requiredVersion() KafkaVersion { switch c.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go index f5a5e9a64c7..14b1b9e13f3 100644 --- a/vendor/github.com/Shopify/sarama/acl_create_response.go +++ b/vendor/github.com/Shopify/sarama/acl_create_response.go @@ -2,7 +2,7 @@ package sarama import "time" -//CreateAclsResponse is a an acl reponse creation type +//CreateAclsResponse is a an acl response creation type type CreateAclsResponse struct { ThrottleTime time.Duration AclCreationResponses []*AclCreationResponse @@ -55,6 +55,10 @@ func (c *CreateAclsResponse) version() int16 { return 0 } +func (c *CreateAclsResponse) headerVersion() int16 { + return 0 +} + func (c *CreateAclsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go index 15908eac972..4152522598d 100644 --- a/vendor/github.com/Shopify/sarama/acl_delete_request.go +++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go @@ -48,6 +48,10 @@ func (d *DeleteAclsRequest) version() int16 { return int16(d.Version) } +func (c *DeleteAclsRequest) headerVersion() int16 { + return 1 +} + func (d *DeleteAclsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go index 65295652af4..cb630882673 100644 --- a/vendor/github.com/Shopify/sarama/acl_delete_response.go +++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go @@ -53,7 +53,11 @@ func (d *DeleteAclsResponse) key() int16 { } func (d *DeleteAclsResponse) version() int16 { - return int16(d.Version) + return d.Version +} + +func (d *DeleteAclsResponse) headerVersion() int16 { + return 0 } func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go index 5222d46ee5b..29841a5ce33 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_request.go +++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go @@ -25,6 +25,10 @@ func (d *DescribeAclsRequest) version() int16 { return int16(d.Version) } +func (d *DescribeAclsRequest) headerVersion() int16 { + return 1 +} + func (d *DescribeAclsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go index 12126e54d71..c43408b244d 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_response.go +++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go @@ -74,7 +74,11 @@ func (d *DescribeAclsResponse) key() int16 { } func (d *DescribeAclsResponse) version() int16 { - return int16(d.Version) + return d.Version +} + +func (d *DescribeAclsResponse) headerVersion() int16 { + return 0 } func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go index fc227ab8689..95586f9a1f8 100644 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go @@ -48,6 +48,10 @@ func (a *AddOffsetsToTxnRequest) version() int16 { return 0 } +func (a *AddOffsetsToTxnRequest) headerVersion() int16 { + return 1 +} + func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go index c88c1f89f91..bdb18441993 100644 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go @@ -40,6 +40,10 @@ func (a *AddOffsetsToTxnResponse) version() int16 { return 0 } +func (a *AddOffsetsToTxnResponse) headerVersion() int16 { + return 0 +} + func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go index 8d4b42e345b..6289f451480 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go @@ -72,6 +72,10 @@ func (a *AddPartitionsToTxnRequest) version() int16 { return 0 } +func (a *AddPartitionsToTxnRequest) headerVersion() int16 { + return 1 +} + func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go index eb4f23eca35..73b73b07f84 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go @@ -79,6 +79,10 @@ func (a *AddPartitionsToTxnResponse) version() int16 { return 0 } +func (a *AddPartitionsToTxnResponse) headerVersion() int16 { + return 0 +} + func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go index 6c9b1e9e731..0430d984121 100644 --- a/vendor/github.com/Shopify/sarama/admin.go +++ b/vendor/github.com/Shopify/sarama/admin.go @@ -2,8 +2,11 @@ package sarama import ( "errors" + "fmt" "math/rand" + "strconv" "sync" + "time" ) // ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics, @@ -39,6 +42,14 @@ type ClusterAdmin interface { // new partitions. This operation is supported by brokers with version 1.0.0 or higher. CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error + // Alter the replica assignment for partitions. + // This operation is supported by brokers with version 2.4.0.0 or higher. + AlterPartitionReassignments(topic string, assignment [][]int32) error + + // Provides info on ongoing partitions replica reassignments. + // This operation is supported by brokers with version 2.4.0.0 or higher. + ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) + // Delete records whose offset is smaller than the given offset of the corresponding partition. // This operation is supported by brokers with version 0.11.0.0 or higher. DeleteRecords(topic string, partitionOffsets map[int32]int64) error @@ -90,6 +101,9 @@ type ClusterAdmin interface { // Get information about the nodes in the cluster DescribeCluster() (brokers []*Broker, controllerID int32, err error) + // Get information about all log directories on the given set of brokers + DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error) + // Close shuts down the admin and closes underlying client. Close() error } @@ -132,8 +146,45 @@ func (ca *clusterAdmin) Controller() (*Broker, error) { return ca.client.Controller() } -func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error { +func (ca *clusterAdmin) refreshController() (*Broker, error) { + return ca.client.RefreshController() +} + +// isErrNoController returns `true` if the given error type unwraps to an +// `ErrNotController` response from Kafka +func isErrNoController(err error) bool { + switch e := err.(type) { + case *TopicError: + return e.Err == ErrNotController + case *TopicPartitionError: + return e.Err == ErrNotController + case KError: + return e == ErrNotController + } + return false +} + +// retryOnError will repeatedly call the given (error-returning) func in the +// case that its response is non-nil and retriable (as determined by the +// provided retriable func) up to the maximum number of tries permitted by +// the admin client configuration +func (ca *clusterAdmin) retryOnError(retriable func(error) bool, fn func() error) error { + var err error + for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ { + err = fn() + if err == nil || !retriable(err) { + return err + } + Logger.Printf( + "admin/request retrying after %dms... (%d attempts remaining)\n", + ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt) + time.Sleep(ca.conf.Admin.Retry.Backoff) + continue + } + return err +} +func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error { if topic == "" { return ErrInvalidTopic } @@ -158,26 +209,31 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO request.Version = 2 } - b, err := ca.Controller() - if err != nil { - return err - } + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } - rsp, err := b.CreateTopics(request) - if err != nil { - return err - } + rsp, err := b.CreateTopics(request) + if err != nil { + return err + } - topicErr, ok := rsp.TopicErrors[topic] - if !ok { - return ErrIncompleteResponse - } + topicErr, ok := rsp.TopicErrors[topic] + if !ok { + return ErrIncompleteResponse + } - if topicErr.Err != ErrNoError { - return topicErr - } + if topicErr.Err != ErrNoError { + if topicErr.Err == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } - return nil + return nil + }) } func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { @@ -214,7 +270,7 @@ func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32 Topics: []string{}, } - if ca.conf.Version.IsAtLeast(V0_11_0_0) { + if ca.conf.Version.IsAtLeast(V0_10_0_0) { request.Version = 1 } @@ -226,6 +282,16 @@ func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32 return response.Brokers, response.ControllerID, nil } +func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) { + brokers := ca.client.Brokers() + for _, b := range brokers { + if b.ID() == id { + return b, nil + } + } + return nil, fmt.Errorf("could not find broker id %d", id) +} + func (ca *clusterAdmin) findAnyBroker() (*Broker, error) { brokers := ca.client.Brokers() if len(brokers) > 0 { @@ -283,6 +349,15 @@ func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) { describeConfigsReq := &DescribeConfigsRequest{ Resources: describeConfigsResources, } + + if ca.conf.Version.IsAtLeast(V1_1_0_0) { + describeConfigsReq.Version = 1 + } + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + describeConfigsReq.Version = 2 + } + describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq) if err != nil { return nil, err @@ -308,7 +383,6 @@ func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) { } func (ca *clusterAdmin) DeleteTopic(topic string) error { - if topic == "" { return ErrInvalidTopic } @@ -322,25 +396,31 @@ func (ca *clusterAdmin) DeleteTopic(topic string) error { request.Version = 1 } - b, err := ca.Controller() - if err != nil { - return err - } + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } - rsp, err := b.DeleteTopics(request) - if err != nil { - return err - } + rsp, err := b.DeleteTopics(request) + if err != nil { + return err + } - topicErr, ok := rsp.TopicErrorCodes[topic] - if !ok { - return ErrIncompleteResponse - } + topicErr, ok := rsp.TopicErrorCodes[topic] + if !ok { + return ErrIncompleteResponse + } - if topicErr != ErrNoError { - return topicErr - } - return nil + if topicErr != ErrNoError { + if topicErr == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) } func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error { @@ -356,30 +436,110 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ Timeout: ca.conf.Admin.Timeout, } - b, err := ca.Controller() - if err != nil { - return err + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + rsp, err := b.CreatePartitions(request) + if err != nil { + return err + } + + topicErr, ok := rsp.TopicPartitionErrors[topic] + if !ok { + return ErrIncompleteResponse + } + + if topicErr.Err != ErrNoError { + if topicErr.Err == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) +} + +func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error { + if topic == "" { + return ErrInvalidTopic } - rsp, err := b.CreatePartitions(request) - if err != nil { - return err + request := &AlterPartitionReassignmentsRequest{ + TimeoutMs: int32(60000), + Version: int16(0), } - topicErr, ok := rsp.TopicPartitionErrors[topic] - if !ok { - return ErrIncompleteResponse + for i := 0; i < len(assignment); i++ { + request.AddBlock(topic, int32(i), assignment[i]) } - if topicErr.Err != ErrNoError { - return topicErr + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + errs := make([]error, 0) + + rsp, err := b.AlterPartitionReassignments(request) + + if err != nil { + errs = append(errs, err) + } else { + if rsp.ErrorCode > 0 { + errs = append(errs, errors.New(rsp.ErrorCode.Error())) + } + + for topic, topicErrors := range rsp.Errors { + for partition, partitionError := range topicErrors { + if partitionError.errorCode != ErrNoError { + errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error()) + errs = append(errs, errors.New(errStr)) + } + } + } + } + + if len(errs) > 0 { + return ErrReassignPartitions{MultiError{&errs}} + } + + return nil + }) +} + +func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) { + if topic == "" { + return nil, ErrInvalidTopic } - return nil + request := &ListPartitionReassignmentsRequest{ + TimeoutMs: int32(60000), + Version: int16(0), + } + + request.AddBlock(topic, partitions) + + b, err := ca.Controller() + if err != nil { + return nil, err + } + _ = b.Open(ca.client.Config()) + + rsp, err := b.ListPartitionReassignments(request) + + if err == nil && rsp != nil { + return rsp.TopicStatus, nil + } else { + return nil, err + } } func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error { - if topic == "" { return ErrInvalidTopic } @@ -432,8 +592,14 @@ func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]i return nil } -func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) { +// Returns a bool indicating whether the resource request needs to go to a +// specific broker +func dependsOnSpecificNode(resource ConfigResource) bool { + return (resource.Type == BrokerResource && resource.Name != "") || + resource.Type == BrokerLoggerResource +} +func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) { var entries []ConfigEntry var resources []*ConfigResource resources = append(resources, &resource) @@ -442,11 +608,31 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, Resources: resources, } - b, err := ca.Controller() + if ca.conf.Version.IsAtLeast(V1_1_0_0) { + request.Version = 1 + } + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 2 + } + + var ( + b *Broker + err error + ) + + // DescribeConfig of broker/broker logger must be sent to the broker in question + if dependsOnSpecificNode(resource) { + id, _ := strconv.Atoi(resource.Name) + b, err = ca.findBroker(int32(id)) + } else { + b, err = ca.findAnyBroker() + } if err != nil { return nil, err } + _ = b.Open(ca.client.Config()) rsp, err := b.DescribeConfigs(request) if err != nil { return nil, err @@ -457,6 +643,9 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, if rspResource.ErrorMsg != "" { return nil, errors.New(rspResource.ErrorMsg) } + if rspResource.ErrorCode != 0 { + return nil, KError(rspResource.ErrorCode) + } for _, cfgEntry := range rspResource.Configs { entries = append(entries, *cfgEntry) } @@ -466,7 +655,6 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, } func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error { - var resources []*AlterConfigsResource resources = append(resources, &AlterConfigsResource{ Type: resourceType, @@ -479,11 +667,23 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string ValidateOnly: validateOnly, } - b, err := ca.Controller() + var ( + b *Broker + err error + ) + + // AlterConfig of broker/broker logger must be sent to the broker in question + if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) { + id, _ := strconv.Atoi(name) + b, err = ca.findBroker(int32(id)) + } else { + b, err = ca.findAnyBroker() + } if err != nil { return err } + _ = b.Open(ca.client.Config()) rsp, err := b.AlterConfigs(request) if err != nil { return err @@ -494,6 +694,9 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string if rspResource.ErrorMsg != "" { return errors.New(rspResource.ErrorMsg) } + if rspResource.ErrorCode != 0 { + return KError(rspResource.ErrorCode) + } } } return nil @@ -518,7 +721,6 @@ func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error { } func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) { - request := &DescribeAclsRequest{AclFilter: filter} if ca.conf.Version.IsAtLeast(V2_0_0_0) { @@ -566,7 +768,6 @@ func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]Matchi for _, mACL := range fr.MatchingAcls { mAcls = append(mAcls, *mACL) } - } return mAcls, nil } @@ -580,7 +781,6 @@ func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*Group return nil, err } groupsPerBroker[controller] = append(groupsPerBroker[controller], group) - } for broker, brokerGroups := range groupsPerBroker { @@ -623,7 +823,6 @@ func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err e } groupMaps <- groups - }(b, ca.conf) } @@ -688,3 +887,48 @@ func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { return nil } + +func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) { + allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata) + + // Query brokers in parallel, since we may have to query multiple brokers + logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds)) + errors := make(chan error, len(brokerIds)) + wg := sync.WaitGroup{} + + for _, b := range brokerIds { + wg.Add(1) + broker, err := ca.findBroker(b) + if err != nil { + Logger.Printf("Unable to find broker with ID = %v\n", b) + continue + } + go func(b *Broker, conf *Config) { + defer wg.Done() + _ = b.Open(conf) // Ensure that broker is opened + + response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{}) + if err != nil { + errors <- err + return + } + logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata) + logDirs[b.ID()] = response.LogDirs + logDirsMaps <- logDirs + }(broker, ca.conf) + } + + wg.Wait() + close(logDirsMaps) + close(errors) + + for logDirsMap := range logDirsMaps { + for id, logDirs := range logDirsMap { + allLogDirs[id] = logDirs + } + } + + // Intentionally return only the first error for simplicity + err = <-errors + return +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go index 26c275b83d3..c88bb604a43 100644 --- a/vendor/github.com/Shopify/sarama/alter_configs_request.go +++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go @@ -117,6 +117,10 @@ func (a *AlterConfigsRequest) version() int16 { return 0 } +func (a *AlterConfigsRequest) headerVersion() int16 { + return 1 +} + func (a *AlterConfigsRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go index 3893663cfe9..3266f927406 100644 --- a/vendor/github.com/Shopify/sarama/alter_configs_response.go +++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go @@ -2,13 +2,13 @@ package sarama import "time" -//AlterConfigsResponse is a reponse type for alter config +//AlterConfigsResponse is a response type for alter config type AlterConfigsResponse struct { ThrottleTime time.Duration Resources []*AlterConfigsResourceResponse } -//AlterConfigsResourceResponse is a reponse type for alter config resource +//AlterConfigsResourceResponse is a response type for alter config resource type AlterConfigsResourceResponse struct { ErrorCode int16 ErrorMsg string @@ -92,6 +92,10 @@ func (a *AlterConfigsResponse) version() int16 { return 0 } +func (a *AlterConfigsResponse) headerVersion() int16 { + return 0 +} + func (a *AlterConfigsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go new file mode 100644 index 00000000000..f0a2f9dd59b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go @@ -0,0 +1,130 @@ +package sarama + +type alterPartitionReassignmentsBlock struct { + replicas []int32 +} + +func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error { + if err := pe.putNullableCompactInt32Array(b.replicas); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) { + if b.replicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + return nil +} + +type AlterPartitionReassignmentsRequest struct { + TimeoutMs int32 + blocks map[string]map[int32]*alterPartitionReassignmentsBlock + Version int16 +} + +func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error { + pe.putInt32(r.TimeoutMs) + + pe.putCompactArrayLength(len(r.blocks)) + + for topic, partitions := range r.blocks { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.TimeoutMs, err = pd.getInt32(); err != nil { + return err + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &alterPartitionReassignmentsBlock{} + if err := block.decode(pd); err != nil { + return err + } + r.blocks[topic][partition] = block + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return +} + +func (r *AlterPartitionReassignmentsRequest) key() int16 { + return 45 +} + +func (r *AlterPartitionReassignmentsRequest) version() int16 { + return r.Version +} + +func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 { + return 2 +} + +func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) + } + + r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas} +} diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go new file mode 100644 index 00000000000..b3f9a15fe7f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go @@ -0,0 +1,157 @@ +package sarama + +type alterPartitionReassignmentsErrorBlock struct { + errorCode KError + errorMessage *string +} + +func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error { + pe.putInt16(int16(b.errorCode)) + if err := pe.putNullableCompactString(b.errorMessage); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) { + errorCode, err := pd.getInt16() + if err != nil { + return err + } + b.errorCode = KError(errorCode) + b.errorMessage, err = pd.getCompactNullableString() + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return err +} + +type AlterPartitionReassignmentsResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ErrorMessage *string + Errors map[string]map[int32]*alterPartitionReassignmentsErrorBlock +} + +func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock) + r.Errors[topic] = partitions + } + + partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message} +} + +func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.Errors)) + for topic, partitions := range r.Errors { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + if numTopics > 0 { + r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics) + for i := 0; i < numTopics; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + + ongoingPartitionReassignments, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments) + + for j := 0; j < ongoingPartitionReassignments; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &alterPartitionReassignmentsErrorBlock{} + if err := block.decode(pd); err != nil { + return err + } + + r.Errors[topic][partition] = block + } + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *AlterPartitionReassignmentsResponse) key() int16 { + return 45 +} + +func (r *AlterPartitionReassignmentsResponse) version() int16 { + return r.Version +} + +func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 { + return 1 +} + +func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go index b33167c0b1b..d67c5e1e538 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_request.go +++ b/vendor/github.com/Shopify/sarama/api_versions_request.go @@ -20,6 +20,10 @@ func (a *ApiVersionsRequest) version() int16 { return 0 } +func (a *ApiVersionsRequest) headerVersion() int16 { + return 1 +} + func (a *ApiVersionsRequest) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go index bb1f0b31ab7..d09e8d9e153 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_response.go +++ b/vendor/github.com/Shopify/sarama/api_versions_response.go @@ -1,6 +1,6 @@ package sarama -//ApiVersionsResponseBlock is an api version reponse block type +//ApiVersionsResponseBlock is an api version response block type type ApiVersionsResponseBlock struct { ApiKey int16 MinVersion int16 @@ -84,6 +84,10 @@ func (r *ApiVersionsResponse) version() int16 { return 0 } +func (a *ApiVersionsResponse) headerVersion() int16 { + return 0 +} + func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go index 9b15cd1920b..d0ce01b66e7 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -60,13 +60,28 @@ const ( noProducerEpoch = -1 ) -func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) int32 { +func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) { key := fmt.Sprintf("%s-%d", topic, partition) t.mutex.Lock() defer t.mutex.Unlock() sequence := t.sequenceNumbers[key] t.sequenceNumbers[key] = sequence + 1 - return sequence + return sequence, t.producerEpoch +} + +func (t *transactionManager) bumpEpoch() { + t.mutex.Lock() + defer t.mutex.Unlock() + t.producerEpoch++ + for k := range t.sequenceNumbers { + t.sequenceNumbers[k] = 0 + } +} + +func (t *transactionManager) getProducerID() (int64, int16) { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.producerID, t.producerEpoch } func newTransactionManager(conf *Config, client Client) (*transactionManager, error) { @@ -208,6 +223,8 @@ type ProducerMessage struct { flags flagSet expectation chan *ProducerError sequenceNumber int32 + producerEpoch int16 + hasSequence bool } const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. @@ -234,6 +251,9 @@ func (m *ProducerMessage) byteSize(version int) int { func (m *ProducerMessage) clear() { m.flags = 0 m.retries = 0 + m.sequenceNumber = 0 + m.producerEpoch = 0 + m.hasSequence = false } // ProducerError is the type of error generated when the producer fails to deliver a message. @@ -388,10 +408,6 @@ func (tp *topicProducer) dispatch() { continue } } - // All messages being retried (sent or not) have already had their retry count updated - if tp.parent.conf.Producer.Idempotent && msg.retries == 0 { - msg.sequenceNumber = tp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition) - } handler := tp.handlers[msg.Partition] if handler == nil { @@ -411,7 +427,7 @@ func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { var partitions []int32 err := tp.breaker.Run(func() (err error) { - var requiresConsistency = false + requiresConsistency := false if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok { requiresConsistency = ep.MessageRequiresConsistency(msg) } else { @@ -570,6 +586,15 @@ func (pp *partitionProducer) dispatch() { Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) } + // Now that we know we have a broker to actually try and send this message to, generate the sequence + // number for it. + // All messages being retried (sent or not) have already had their retry count updated + // Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer. + if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 { + msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition) + msg.hasSequence = true + } + pp.brokerProducer.input <- msg } } @@ -748,12 +773,21 @@ func (bp *brokerProducer) run() { } if bp.buffer.wouldOverflow(msg) { - if err := bp.waitForSpace(msg); err != nil { + Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) + if err := bp.waitForSpace(msg, false); err != nil { bp.parent.retryMessage(msg, err) continue } } + if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch { + // The epoch was reset, need to roll the buffer over + Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID()) + if err := bp.waitForSpace(msg, true); err != nil { + bp.parent.retryMessage(msg, err) + continue + } + } if err := bp.buffer.add(msg); err != nil { bp.parent.returnError(msg, err) continue @@ -809,9 +843,7 @@ func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { return bp.currentRetries[msg.Topic][msg.Partition] } -func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { - Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) - +func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error { for { select { case response := <-bp.responses: @@ -819,7 +851,7 @@ func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { // handling a response can change our state, so re-check some things if reason := bp.needsRetry(msg); reason != nil { return reason - } else if !bp.buffer.wouldOverflow(msg) { + } else if !bp.buffer.wouldOverflow(msg) && !forceRollover { return nil } case bp.output <- bp.buffer: @@ -1030,6 +1062,12 @@ func (p *asyncProducer) shutdown() { } func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { + // We need to reset the producer ID epoch if we set a sequence number on it, because the broker + // will never see a message with this number, so we can never continue the sequence. + if msg.hasSequence { + Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition) + p.txnmgr.bumpEpoch() + } msg.clear() pErr := &ProducerError{Msg: msg, Err: err} if p.conf.Producer.Return.Errors { diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go index 67c4d96d042..d9789a0264b 100644 --- a/vendor/github.com/Shopify/sarama/balance_strategy.go +++ b/vendor/github.com/Shopify/sarama/balance_strategy.go @@ -47,6 +47,10 @@ type BalanceStrategy interface { // Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions` // and returns a distribution plan. Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) + + // AssignmentData returns the serialized assignment data for the specified + // memberID + AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) } // -------------------------------------------------------------------- @@ -132,6 +136,11 @@ func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, t return plan, nil } +// AssignmentData simple strategies do not require any shared assignment data +func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { + return nil, nil +} + type balanceStrategySortable struct { topic string memberIDs []string @@ -258,7 +267,7 @@ func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetad plan := make(BalanceStrategyPlan, len(currentAssignment)) for memberID, assignments := range currentAssignment { if len(assignments) == 0 { - plan[memberID] = make(map[string][]int32, 0) + plan[memberID] = make(map[string][]int32) } else { for _, assignment := range assignments { plan.Add(memberID, assignment.Topic, assignment.Partition) @@ -268,6 +277,15 @@ func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetad return plan, nil } +// AssignmentData serializes the set of topics currently assigned to the +// specified member as part of the supplied balance plan +func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { + return encode(&StickyAssignorUserDataV1{ + Topics: topics, + Generation: generationID, + }, nil) +} + func strsContains(s []string, value string) bool { for _, entry := range s { if entry == value { @@ -671,14 +689,6 @@ func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumer return sortedPartionIDs } -func deepCopyPartitions(src []topicPartitionAssignment) []topicPartitionAssignment { - dst := make([]topicPartitionAssignment, len(src)) - for i, partition := range src { - dst[i] = partition - } - return dst -} - func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment { copy := make(map[string][]topicPartitionAssignment, len(assignment)) for memberID, subscriptions := range assignment { @@ -938,9 +948,7 @@ func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { for i := 0; i < len(cycle)-1; i++ { superCycle[i] = cycle[i] } - for _, c := range cycle { - superCycle = append(superCycle, c) - } + superCycle = append(superCycle, cycle...) for _, foundCycle := range cycles { if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 { return true diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go index 6a7dbeee12c..938b7eca36b 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -40,6 +40,7 @@ type Broker struct { outgoingByteRate metrics.Meter responseRate metrics.Meter responseSize metrics.Histogram + requestsInFlight metrics.Counter brokerIncomingByteRate metrics.Meter brokerRequestRate metrics.Meter brokerRequestSize metrics.Histogram @@ -47,6 +48,7 @@ type Broker struct { brokerOutgoingByteRate metrics.Meter brokerResponseRate metrics.Meter brokerResponseSize metrics.Histogram + brokerRequestsInFlight metrics.Counter kerberosAuthenticator GSSAPIKerberosAuth } @@ -71,7 +73,7 @@ const ( // server negotiate SASL by wrapping tokens with Kafka protocol headers. SASLHandshakeV1 = int16(1) // SASLExtKeyAuth is the reserved extension key name sent as part of the - // SASL/OAUTHBEARER intial client response + // SASL/OAUTHBEARER initial client response SASLExtKeyAuth = "auth" ) @@ -117,6 +119,7 @@ type SCRAMClient interface { type responsePromise struct { requestTime time.Time correlationID int32 + headerVersion int16 packets chan []byte errors chan error } @@ -151,25 +154,35 @@ func (b *Broker) Open(conf *Config) error { go withRecover(func() { defer b.lock.Unlock() - dialer := net.Dialer{ - Timeout: conf.Net.DialTimeout, - KeepAlive: conf.Net.KeepAlive, - LocalAddr: conf.Net.LocalAddr, - } - - if conf.Net.TLS.Enable { - b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config) - } else if conf.Net.Proxy.Enable { - b.conn, b.connErr = conf.Net.Proxy.Dialer.Dial("tcp", b.addr) - } else { - b.conn, b.connErr = dialer.Dial("tcp", b.addr) - } + dialer := conf.getDialer() + b.conn, b.connErr = dialer.Dial("tcp", b.addr) if b.connErr != nil { Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) b.conn = nil atomic.StoreInt32(&b.opened, 0) return } + + if conf.Net.TLS.Enable { + Logger.Printf("Using tls") + cfg := conf.Net.TLS.Config + if cfg == nil { + cfg = &tls.Config{} + } + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + // Gets the hostname as tls.DialWithDialer does it. + if cfg.ServerName == "" { + colonPos := strings.LastIndex(b.addr, ":") + if colonPos == -1 { + colonPos = len(b.addr) + } + hostname := b.addr[:colonPos] + cfg.ServerName = hostname + } + b.conn = tls.Client(b.conn, cfg) + } + b.conn = newBufConn(b.conn) b.conf = conf @@ -182,6 +195,7 @@ func (b *Broker) Open(conf *Config) error { b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry) b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry) b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry) + b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry) // Do not gather metrics for seeded broker (only used during bootstrap) because they share // the same id (-1) and are already exposed through the global metrics above if b.id >= 0 { @@ -189,7 +203,6 @@ func (b *Broker) Open(conf *Config) error { } if conf.Net.SASL.Enable { - b.connErr = b.authenticateViaSASL() if b.connErr != nil { @@ -366,7 +379,7 @@ func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { return response, nil } -//CommitOffset return an Offset commit reponse or error +//CommitOffset return an Offset commit response or error func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { response := new(OffsetCommitResponse) @@ -528,6 +541,32 @@ func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePart return response, nil } +//AlterPartitionReassignments sends a alter partition reassignments request and +//returns alter partition reassignments response +func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) { + response := new(AlterPartitionReassignmentsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//ListPartitionReassignments sends a list partition reassignments request and +//returns list partition reassignments response +func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) { + response := new(ListPartitionReassignmentsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + //DeleteRecords send a request to delete records and return delete record //response or error func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) { @@ -708,7 +747,7 @@ func (b *Broker) write(buf []byte) (n int, err error) { return b.conn.Write(buf) } -func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) { +func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) { b.lock.Lock() defer b.lock.Unlock() @@ -730,27 +769,35 @@ func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, } requestTime := time.Now() + // Will be decremented in responseReceiver (except error or request with NoResponse) + b.addRequestInFlightMetrics(1) bytes, err := b.write(buf) b.updateOutgoingCommunicationMetrics(bytes) if err != nil { + b.addRequestInFlightMetrics(-1) return nil, err } b.correlationID++ if !promiseResponse { // Record request latency without the response - b.updateRequestLatencyMetrics(time.Since(requestTime)) + b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime)) return nil, nil } - promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)} + promise := responsePromise{requestTime, req.correlationID, responseHeaderVersion, make(chan []byte), make(chan error)} b.responses <- promise return &promise, nil } -func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error { - promise, err := b.send(req, res != nil) +func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { + responseHeaderVersion := int16(-1) + if res != nil { + responseHeaderVersion = res.headerVersion() + } + + promise, err := b.send(req, res != nil, responseHeaderVersion) if err != nil { return err } @@ -830,14 +877,19 @@ func (b *Broker) encode(pe packetEncoder, version int16) (err error) { func (b *Broker) responseReceiver() { var dead error - header := make([]byte, 8) for response := range b.responses { if dead != nil { + // This was previously incremented in send() and + // we are not calling updateIncomingCommunicationMetrics() + b.addRequestInFlightMetrics(-1) response.errors <- dead continue } + var headerLength = getHeaderLength(response.headerVersion) + header := make([]byte, headerLength) + bytesReadHeader, err := b.readFull(header) requestLatency := time.Since(response.requestTime) if err != nil { @@ -848,7 +900,7 @@ func (b *Broker) responseReceiver() { } decodedHeader := responseHeader{} - err = decode(header, &decodedHeader) + err = versionedDecode(header, &decodedHeader, response.headerVersion) if err != nil { b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err @@ -864,7 +916,7 @@ func (b *Broker) responseReceiver() { continue } - buf := make([]byte, decodedHeader.length-4) + buf := make([]byte, decodedHeader.length-int32(headerLength)+4) bytesReadBody, err := b.readFull(buf) b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) if err != nil { @@ -878,6 +930,15 @@ func (b *Broker) responseReceiver() { close(b.done) } +func getHeaderLength(headerVersion int16) int8 { + if headerVersion < 1 { + return 8 + } else { + // header contains additional tagged field length (0), we don't support actual tags yet. + return 9 + } +} + func (b *Broker) authenticateViaSASL() error { switch b.conf.Net.SASL.Mechanism { case SASLTypeOAuth: @@ -909,9 +970,12 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int } requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) bytes, err := b.write(buf) b.updateOutgoingCommunicationMetrics(bytes) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error()) return err } @@ -920,6 +984,7 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int header := make([]byte, 8) // response header _, err = b.readFull(header) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error()) return err } @@ -928,6 +993,7 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int payload := make([]byte, length-4) n, err := b.readFull(payload) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error()) return err } @@ -975,10 +1041,9 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int // When credentials are invalid, Kafka replies with a SaslAuthenticate response // containing an error code and message detailing the authentication failure. func (b *Broker) sendAndReceiveSASLPlainAuth() error { - // default to V0 to allow for backward compatability when SASL is enabled + // default to V0 to allow for backward compatibility when SASL is enabled // but not the handshake if b.conf.Net.SASL.Handshake { - handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version) if handshakeErr != nil { Logger.Printf("Error while performing SASL handshake %s\n", b.addr) @@ -994,16 +1059,18 @@ func (b *Broker) sendAndReceiveSASLPlainAuth() error { // sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol func (b *Broker) sendAndReceiveV0SASLPlainAuth() error { - - length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) + length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) authBytes := make([]byte, length+4) //4 byte length header + auth data binary.BigEndian.PutUint32(authBytes, uint32(length)) - copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)) + copy(authBytes[4:], []byte(b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)) requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) bytesWritten, err := b.write(authBytes) b.updateOutgoingCommunicationMetrics(bytesWritten) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) return err } @@ -1028,11 +1095,13 @@ func (b *Broker) sendAndReceiveV1SASLPlainAuth() error { requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID) - b.updateOutgoingCommunicationMetrics(bytesWritten) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) return err } @@ -1085,16 +1154,18 @@ func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error { // if the broker responds with a challenge, in which case the token is // rejected. func (b *Broker) sendClientMessage(message []byte) (bool, error) { - requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) correlationID := b.correlationID bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID) + b.updateOutgoingCommunicationMetrics(bytesWritten) if err != nil { + b.addRequestInFlightMetrics(-1) return false, err } - b.updateOutgoingCommunicationMetrics(bytesWritten) b.correlationID++ res := &SaslAuthenticateResponse{} @@ -1125,22 +1196,25 @@ func (b *Broker) sendAndReceiveSASLSCRAMv1() error { msg, err := scramClient.Step("") if err != nil { return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error()) - } for !scramClient.Done() { requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) correlationID := b.correlationID bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg)) + b.updateOutgoingCommunicationMetrics(bytesWritten) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) return err } - b.updateOutgoingCommunicationMetrics(bytesWritten) b.correlationID++ challenge, err := b.receiveSaslAuthenticateResponse(correlationID) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) return err } @@ -1176,7 +1250,7 @@ func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, e } header := responseHeader{} - err = decode(buf, &header) + err = versionedDecode(buf, &header, 0) if err != nil { return nil, err } @@ -1233,7 +1307,7 @@ func mapToString(extensions map[string]string, keyValSep string, elemSep string) } func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) { - authBytes := []byte("\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password) + authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password) rb := &SaslAuthenticateRequest{authBytes} req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} buf, err := encode(req, b.conf.MetricRegistry) @@ -1245,7 +1319,6 @@ func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, erro } func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) { - rb := &SaslAuthenticateRequest{initialResp} req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} @@ -1266,7 +1339,7 @@ func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correl } header := responseHeader{} - err = decode(buf, &header) + err = versionedDecode(buf, &header, 0) if err != nil { return bytesRead, err } @@ -1294,7 +1367,7 @@ func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correl } func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { - b.updateRequestLatencyMetrics(requestLatency) + b.updateRequestLatencyAndInFlightMetrics(requestLatency) b.responseRate.Mark(1) if b.brokerResponseRate != nil { @@ -1313,7 +1386,7 @@ func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency ti } } -func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) { +func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) { requestLatencyInMs := int64(requestLatency / time.Millisecond) b.requestLatency.Update(requestLatencyInMs) @@ -1321,6 +1394,14 @@ func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) { b.brokerRequestLatency.Update(requestLatencyInMs) } + b.addRequestInFlightMetrics(-1) +} + +func (b *Broker) addRequestInFlightMetrics(i int64) { + b.requestsInFlight.Inc(i) + if b.brokerRequestsInFlight != nil { + b.brokerRequestsInFlight.Inc(i) + } } func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { @@ -1339,7 +1420,6 @@ func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { if b.brokerRequestSize != nil { b.brokerRequestSize.Update(requestSize) } - } func (b *Broker) registerMetrics() { @@ -1350,12 +1430,14 @@ func (b *Broker) registerMetrics() { b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate") b.brokerResponseRate = b.registerMeter("response-rate") b.brokerResponseSize = b.registerHistogram("response-size") + b.brokerRequestsInFlight = b.registerCounter("requests-in-flight") } func (b *Broker) unregisterMetrics() { for _, name := range b.registeredMetrics { b.conf.MetricRegistry.Unregister(name) } + b.registeredMetrics = nil } func (b *Broker) registerMeter(name string) metrics.Meter { @@ -1369,3 +1451,9 @@ func (b *Broker) registerHistogram(name string) metrics.Histogram { b.registeredMetrics = append(b.registeredMetrics, nameForBroker) return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry) } + +func (b *Broker) registerCounter(name string) metrics.Counter { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry) +} diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go index 040cfe9e395..c3392f961e8 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/Shopify/sarama/client.go @@ -17,9 +17,15 @@ type Client interface { // altered after it has been created. Config() *Config - // Controller returns the cluster controller broker. Requires Kafka 0.10 or higher. + // Controller returns the cluster controller broker. It will return a + // locally cached value if it's available. You can call RefreshController + // to update the cached value. Requires Kafka 0.10 or higher. Controller() (*Broker, error) + // RefreshController retrieves the cluster controller from fresh metadata + // and stores it in the local cache. Requires Kafka 0.10 or higher. + RefreshController() (*Broker, error) + // Brokers returns the current set of active brokers as retrieved from cluster metadata. Brokers() []*Broker @@ -193,7 +199,6 @@ func (client *client) Brokers() []*Broker { func (client *client) InitProducerID() (*InitProducerIDResponse, error) { var err error for broker := client.any(); broker != nil; broker = client.any() { - req := &InitProducerIDRequest{} response, err := broker.InitProducerID(req) @@ -242,6 +247,9 @@ func (client *client) Close() error { } func (client *client) Closed() bool { + client.lock.RLock() + defer client.lock.RUnlock() + return client.brokers == nil } @@ -484,6 +492,35 @@ func (client *client) Controller() (*Broker, error) { return controller, nil } +// deregisterController removes the cached controllerID +func (client *client) deregisterController() { + client.lock.Lock() + defer client.lock.Unlock() + delete(client.brokers, client.controllerID) +} + +// RefreshController retrieves the cluster controller from fresh metadata +// and stores it in the local cache. Requires Kafka 0.10 or higher. +func (client *client) RefreshController() (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.deregisterController() + + if err := client.refreshMetadata(); err != nil { + return nil, err + } + + controller := client.cachedController() + if controller == nil { + return nil, ErrControllerNotAvailable + } + + _ = controller.Open(client.conf) + return controller, nil +} + func (client *client) Coordinator(consumerGroup string) (*Broker, error) { if client.Closed() { return nil, ErrClosedClient @@ -525,10 +562,39 @@ func (client *client) RefreshCoordinator(consumerGroup string) error { // private broker management helpers +func (client *client) updateBroker(brokers []*Broker) { + var currentBroker = make(map[int32]*Broker, len(brokers)) + + for _, broker := range brokers { + currentBroker[broker.ID()] = broker + if client.brokers[broker.ID()] == nil { // add new broker + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + } else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address + safeAsyncClose(client.brokers[broker.ID()]) + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) + } + } + + for id, broker := range client.brokers { + if _, exist := currentBroker[id]; !exist { // remove old broker + safeAsyncClose(broker) + delete(client.brokers, id) + Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr()) + } + } +} + // registerBroker makes sure a broker received by a Metadata or Coordinator request is registered // in the brokers map. It returns the broker that is registered, which may be the provided broker, // or a previously registered Broker instance. You must hold the write lock before calling this function. func (client *client) registerBroker(broker *Broker) { + if client.brokers == nil { + Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr()) + return + } + if client.brokers[broker.ID()] == nil { client.brokers[broker.ID()] = broker Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) @@ -756,7 +822,7 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout") return err } - Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) + Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) if backoff > 0 { time.Sleep(backoff) } @@ -822,7 +888,7 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, } if broker != nil { - Logger.Println("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr) + Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr) return retry(ErrOutOfBrokers) } @@ -833,16 +899,19 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, // if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) { + if client.Closed() { + return + } + client.lock.Lock() defer client.lock.Unlock() // For all the brokers we received: // - if it is a new ID, save it // - if it is an existing ID, but the address we have is stale, discard the old one and save it + // - if some brokers is not exist in it, remove old broker // - otherwise ignore it, replacing our existing one would just bounce the connection - for _, broker := range data.Brokers { - client.registerBroker(broker) - } + client.updateBroker(data.Brokers) client.controllerID = data.ControllerID diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go index 9247c3553c7..12cd7c3d510 100644 --- a/vendor/github.com/Shopify/sarama/compress.go +++ b/vendor/github.com/Shopify/sarama/compress.go @@ -6,7 +6,7 @@ import ( "fmt" "sync" - "github.com/eapache/go-xerial-snappy" + snappy "github.com/eapache/go-xerial-snappy" "github.com/pierrec/lz4" ) @@ -22,6 +22,87 @@ var ( return gzip.NewWriter(nil) }, } + gzipWriterPoolForCompressionLevel1 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 1) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel2 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 2) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel3 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 3) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel4 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 4) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel5 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 5) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel6 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 6) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel7 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 7) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel8 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 8) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel9 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 9) + if err != nil { + panic(err) + } + return gz + }, + } ) func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) { @@ -34,15 +115,53 @@ func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) { buf bytes.Buffer writer *gzip.Writer ) - if level != CompressionLevelDefault { + + switch level { + case CompressionLevelDefault: + writer = gzipWriterPool.Get().(*gzip.Writer) + defer gzipWriterPool.Put(writer) + writer.Reset(&buf) + case 1: + writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel1.Put(writer) + writer.Reset(&buf) + case 2: + writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel2.Put(writer) + writer.Reset(&buf) + case 3: + writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel3.Put(writer) + writer.Reset(&buf) + case 4: + writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel4.Put(writer) + writer.Reset(&buf) + case 5: + writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel5.Put(writer) + writer.Reset(&buf) + case 6: + writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel6.Put(writer) + writer.Reset(&buf) + case 7: + writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel7.Put(writer) + writer.Reset(&buf) + case 8: + writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel8.Put(writer) + writer.Reset(&buf) + case 9: + writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel9.Put(writer) + writer.Reset(&buf) + default: writer, err = gzip.NewWriterLevel(&buf, level) if err != nil { return nil, err } - } else { - writer = gzipWriterPool.Get().(*gzip.Writer) - defer gzipWriterPool.Put(writer) - writer.Reset(&buf) } if _, err := writer.Write(data); err != nil { return nil, err diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go index e515e0432d8..0ce308f80a2 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/Shopify/sarama/config.go @@ -21,6 +21,13 @@ var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) type Config struct { // Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client. Admin struct { + Retry struct { + // The total number of times to retry sending (retriable) admin requests (default 5). + // Similar to the `retries` setting of the JVM AdminClientConfig. + Max int + // Backoff time between retries of a failed request (default 100ms) + Backoff time.Duration + } // The maximum duration the administrative Kafka client will wait for ClusterAdmin operations, // including topics, brokers, configurations and ACLs (defaults to 3 seconds). Timeout time.Duration @@ -65,8 +72,15 @@ type Config struct { // (defaults to true). You should only set this to false if you're using // a non-Kafka SASL proxy. Handshake bool - //username and password for SASL/PLAIN or SASL/SCRAM authentication - User string + // AuthIdentity is an (optional) authorization identity (authzid) to + // use for SASL/PLAIN authentication (if different from User) when + // an authenticated user is permitted to act as the presented + // alternative user. See RFC4616 for details. + AuthIdentity string + // User is the authentication identity (authcid) to present for + // SASL/PLAIN or SASL/SCRAM authentication + User string + // Password for SASL/PLAIN authentication Password string // authz id used for SASL/SCRAM authentication SCRAMAuthzID string @@ -82,8 +96,9 @@ type Config struct { GSSAPI GSSAPIConfig } - // KeepAlive specifies the keep-alive period for an active network connection. - // If zero, keep-alives are disabled. (default is 0: disabled). + // KeepAlive specifies the keep-alive period for an active network connection (defaults to 0). + // If zero or positive, keep-alives are enabled. + // If negative, keep-alives are disabled. KeepAlive time.Duration // LocalAddr is the local address to use when dialing an @@ -338,9 +353,21 @@ type Config struct { // offsets. This currently requires the manual use of an OffsetManager // but will eventually be automated. Offsets struct { - // How frequently to commit updated offsets. Defaults to 1s. + // Deprecated: CommitInterval exists for historical compatibility + // and should not be used. Please use Consumer.Offsets.AutoCommit CommitInterval time.Duration + // AutoCommit specifies configuration for commit messages automatically. + AutoCommit struct { + // Whether or not to auto-commit updated offsets back to the broker. + // (default enabled). + Enable bool + + // How frequently to commit updated offsets. Ineffective unless + // auto-commit is enabled (default 1s) + Interval time.Duration + } + // The initial offset to use if no offset was previously committed. // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest. Initial int64 @@ -370,6 +397,10 @@ type Config struct { // debugging, and auditing purposes. Defaults to "sarama", but you should // probably set it to something specific to your application. ClientID string + // A rack identifier for this client. This can be any string value which + // indicates where this client is physically located. + // It corresponds with the broker config 'broker.rack' + RackID string // The number of events to buffer in internal and external channels. This // permits the producer and consumer to continue processing some messages // in the background while user code is working, greatly improving throughput. @@ -394,6 +425,8 @@ type Config struct { func NewConfig() *Config { c := &Config{} + c.Admin.Retry.Max = 5 + c.Admin.Retry.Backoff = 100 * time.Millisecond c.Admin.Timeout = 3 * time.Second c.Net.MaxOpenRequests = 5 @@ -423,7 +456,8 @@ func NewConfig() *Config { c.Consumer.MaxWaitTime = 250 * time.Millisecond c.Consumer.MaxProcessingTime = 100 * time.Millisecond c.Consumer.Return.Errors = false - c.Consumer.Offsets.CommitInterval = 1 * time.Second + c.Consumer.Offsets.AutoCommit.Enable = true + c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second c.Consumer.Offsets.Initial = OffsetNewest c.Consumer.Offsets.Retry.Max = 3 @@ -504,8 +538,6 @@ func (c *Config) Validate() error { return ConfigurationError("Net.ReadTimeout must be > 0") case c.Net.WriteTimeout <= 0: return ConfigurationError("Net.WriteTimeout must be > 0") - case c.Net.KeepAlive < 0: - return ConfigurationError("Net.KeepAlive must be >= 0") case c.Net.SASL.Enable: if c.Net.SASL.Mechanism == "" { c.Net.SASL.Mechanism = SASLTypePlaintext @@ -621,6 +653,10 @@ func (c *Config) Validate() error { } } + if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) { + return ConfigurationError("zstd compression requires Version >= V2_1_0_0") + } + if c.Producer.Idempotent { if !c.Version.IsAtLeast(V0_11_0_0) { return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0") @@ -650,8 +686,8 @@ func (c *Config) Validate() error { return ConfigurationError("Consumer.MaxProcessingTime must be > 0") case c.Consumer.Retry.Backoff < 0: return ConfigurationError("Consumer.Retry.Backoff must be >= 0") - case c.Consumer.Offsets.CommitInterval <= 0: - return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0") + case c.Consumer.Offsets.AutoCommit.Interval <= 0: + return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0") case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest: return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest") case c.Consumer.Offsets.Retry.Max < 0: @@ -660,6 +696,11 @@ func (c *Config) Validate() error { return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted") } + if c.Consumer.Offsets.CommitInterval != 0 { + Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" + + " and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored") + } + // validate IsolationLevel if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) { return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0") @@ -693,3 +734,16 @@ func (c *Config) Validate() error { return nil } + +func (c *Config) getDialer() proxy.Dialer { + if c.Net.Proxy.Enable { + Logger.Printf("using proxy %s", c.Net.Proxy.Dialer) + return c.Net.Proxy.Dialer + } else { + return &net.Dialer{ + Timeout: c.Net.DialTimeout, + KeepAlive: c.Net.KeepAlive, + LocalAddr: c.Net.LocalAddr, + } + } +} diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go index 5399d75cabf..bef1053aaed 100644 --- a/vendor/github.com/Shopify/sarama/config_resource_type.go +++ b/vendor/github.com/Shopify/sarama/config_resource_type.go @@ -1,22 +1,18 @@ package sarama -//ConfigResourceType is a type for config resource +// ConfigResourceType is a type for resources that have configs. type ConfigResourceType int8 -// Taken from : -// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes +// Taken from: +// https://github.com/apache/kafka/blob/ed7c071e07f1f90e4c2895582f61ca090ced3c42/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L32-L55 const ( - //UnknownResource constant type - UnknownResource ConfigResourceType = iota - //AnyResource constant type - AnyResource - //TopicResource constant type - TopicResource - //GroupResource constant type - GroupResource - //ClusterResource constant type - ClusterResource - //BrokerResource constant type - BrokerResource + // UnknownResource constant type + UnknownResource ConfigResourceType = 0 + // TopicResource constant type + TopicResource ConfigResourceType = 2 + // BrokerResource constant type + BrokerResource ConfigResourceType = 4 + // BrokerLoggerResource constant type + BrokerLoggerResource ConfigResourceType = 8 ) diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go index 72c4d7cd8ff..e16d08aa9f1 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -887,6 +887,21 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { request.Version = 4 request.Isolation = bc.consumer.conf.Consumer.IsolationLevel } + if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) { + request.Version = 7 + // We do not currently implement KIP-227 FetchSessions. Setting the id to 0 + // and the epoch to -1 tells the broker not to generate as session ID we're going + // to just ignore anyway. + request.SessionID = 0 + request.SessionEpoch = -1 + } + if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) { + request.Version = 10 + } + if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) { + request.Version = 11 + request.RackID = bc.consumer.conf.RackID + } for child := range bc.subscriptions { request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go index da99e8811c8..056b9e387fe 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group.go +++ b/vendor/github.com/Shopify/sarama/consumer_group.go @@ -38,6 +38,9 @@ type ConsumerGroup interface { // as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout // is exceeded, the consumer will be removed from the group by Kafka, which will cause offset // commit failures. + // This method should be called inside an infinite loop, when a + // server-side rebalance happens, the consumer session will need to be + // recreated to get the new claims. Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error // Errors returns a read channel of errors that occurred during the consumer life-cycle. @@ -120,9 +123,6 @@ func (c *consumerGroup) Close() (err error) { c.closeOnce.Do(func() { close(c.closed) - c.lock.Lock() - defer c.lock.Unlock() - // leave group if e := c.leave(); e != nil { err = e @@ -175,6 +175,7 @@ func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler Co // loop check topic partition numbers changed // will trigger rebalance when any topic partitions number had changed + // avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine go c.loopCheckPartitionNumbers(topics, sess) // Wait for session exit signal @@ -333,20 +334,14 @@ func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrate MemberId: c.memberID, GenerationId: generationID, } + strategy := c.config.Consumer.Group.Rebalance.Strategy for memberID, topics := range plan { assignment := &ConsumerGroupMemberAssignment{Topics: topics} - - // Include topic assignments in group-assignment userdata for each consumer-group member - if c.config.Consumer.Group.Rebalance.Strategy.Name() == StickyBalanceStrategyName { - userDataBytes, err := encode(&StickyAssignorUserDataV1{ - Topics: topics, - Generation: generationID, - }, nil) - if err != nil { - return nil, err - } - assignment.UserData = userDataBytes + userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID) + if err != nil { + return nil, err } + assignment.UserData = userDataBytes if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil { return nil, err } @@ -384,8 +379,10 @@ func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) return strategy.Plan(members, topics) } -// Leaves the cluster, called by Close, protected by lock. +// Leaves the cluster, called by Close. func (c *consumerGroup) leave() error { + c.lock.Lock() + defer c.lock.Unlock() if c.memberID == "" { return nil } @@ -417,12 +414,6 @@ func (c *consumerGroup) leave() error { } func (c *consumerGroup) handleError(err error, topic string, partition int32) { - select { - case <-c.closed: - return - default: - } - if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 { err = &ConsumerError{ Topic: topic, @@ -431,18 +422,27 @@ func (c *consumerGroup) handleError(err error, topic string, partition int32) { } } - if c.config.Consumer.Return.Errors { - select { - case c.errors <- err: - default: - } - } else { + if !c.config.Consumer.Return.Errors { Logger.Println(err) + return + } + + select { + case <-c.closed: + //consumer is closed + return + default: + } + + select { + case c.errors <- err: + default: + // no error listener } } func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) { - pause := time.NewTicker(c.config.Consumer.Group.Heartbeat.Interval * 2) + pause := time.NewTicker(c.config.Metadata.RefreshFrequency) defer session.cancel() defer pause.Stop() var oldTopicToPartitionNum map[string]int @@ -462,6 +462,10 @@ func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *cons } select { case <-pause.C: + case <-session.ctx.Done(): + Logger.Printf("loop check partition number coroutine will exit, topics %s", topics) + // if session closed by other, should be exited + return case <-c.closed: return } @@ -469,10 +473,6 @@ func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *cons } func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) { - if err := c.client.RefreshMetadata(topics...); err != nil { - Logger.Printf("Consumer Group refresh metadata failed %v", err) - return nil, err - } topicToPartitionNum := make(map[string]int, len(topics)) for _, topic := range topics { if partitionNum, err := c.client.Partitions(topic); err != nil { @@ -760,7 +760,7 @@ func (s *consumerGroupSession) heartbeatLoop() { case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration: return default: - s.parent.handleError(err, "", -1) + s.parent.handleError(resp.Err, "", -1) return } diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go index a8dcaefe8fc..e5ebdaef5ba 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go @@ -29,6 +29,10 @@ func (r *ConsumerMetadataRequest) version() int16 { return 0 } +func (r *ConsumerMetadataRequest) headerVersion() int16 { + return 1 +} + func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { return V0_8_2_0 } diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go index f39a8711cbb..1b5d00d2203 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go @@ -73,6 +73,10 @@ func (r *ConsumerMetadataResponse) version() int16 { return 0 } +func (r *ConsumerMetadataResponse) headerVersion() int16 { + return 0 +} + func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { return V0_8_2_0 } diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go index af321e99466..46fb0440249 100644 --- a/vendor/github.com/Shopify/sarama/create_partitions_request.go +++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go @@ -67,6 +67,10 @@ func (r *CreatePartitionsRequest) version() int16 { return 0 } +func (r *CreatePartitionsRequest) headerVersion() int16 { + return 1 +} + func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { return V1_0_0_0 } diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go index bb18204a7c2..12ce78857bc 100644 --- a/vendor/github.com/Shopify/sarama/create_partitions_response.go +++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go @@ -63,6 +63,10 @@ func (r *CreatePartitionsResponse) version() int16 { return 0 } +func (r *CreatePartitionsResponse) headerVersion() int16 { + return 0 +} + func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { return V1_0_0_0 } diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go index 709c0a44e71..287acd069b6 100644 --- a/vendor/github.com/Shopify/sarama/create_topics_request.go +++ b/vendor/github.com/Shopify/sarama/create_topics_request.go @@ -79,6 +79,10 @@ func (c *CreateTopicsRequest) version() int16 { return c.Version } +func (r *CreateTopicsRequest) headerVersion() int16 { + return 1 +} + func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { switch c.Version { case 2: diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go index a493e02accf..7e1448a6692 100644 --- a/vendor/github.com/Shopify/sarama/create_topics_response.go +++ b/vendor/github.com/Shopify/sarama/create_topics_response.go @@ -70,6 +70,10 @@ func (c *CreateTopicsResponse) version() int16 { return c.Version } +func (c *CreateTopicsResponse) headerVersion() int16 { + return 0 +} + func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { switch c.Version { case 2: diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go index eaccbfc268e..e4dc3c185a6 100644 --- a/vendor/github.com/Shopify/sarama/decompress.go +++ b/vendor/github.com/Shopify/sarama/decompress.go @@ -7,7 +7,7 @@ import ( "io/ioutil" "sync" - "github.com/eapache/go-xerial-snappy" + snappy "github.com/eapache/go-xerial-snappy" "github.com/pierrec/lz4" ) diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go index 305a324ac2d..4ac8bbee4cb 100644 --- a/vendor/github.com/Shopify/sarama/delete_groups_request.go +++ b/vendor/github.com/Shopify/sarama/delete_groups_request.go @@ -21,6 +21,10 @@ func (r *DeleteGroupsRequest) version() int16 { return 0 } +func (r *DeleteGroupsRequest) headerVersion() int16 { + return 1 +} + func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion { return V1_1_0_0 } diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go index c067ebb42b0..5e7b1ed3681 100644 --- a/vendor/github.com/Shopify/sarama/delete_groups_response.go +++ b/vendor/github.com/Shopify/sarama/delete_groups_response.go @@ -65,6 +65,10 @@ func (r *DeleteGroupsResponse) version() int16 { return 0 } +func (r *DeleteGroupsResponse) headerVersion() int16 { + return 0 +} + func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion { return V1_1_0_0 } diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go index 93efafd4d0b..dc106b17d62 100644 --- a/vendor/github.com/Shopify/sarama/delete_records_request.go +++ b/vendor/github.com/Shopify/sarama/delete_records_request.go @@ -77,6 +77,10 @@ func (d *DeleteRecordsRequest) version() int16 { return 0 } +func (d *DeleteRecordsRequest) headerVersion() int16 { + return 1 +} + func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go index 733a58b6bc3..d530b4c7e91 100644 --- a/vendor/github.com/Shopify/sarama/delete_records_response.go +++ b/vendor/github.com/Shopify/sarama/delete_records_response.go @@ -80,6 +80,10 @@ func (d *DeleteRecordsResponse) version() int16 { return 0 } +func (d *DeleteRecordsResponse) headerVersion() int16 { + return 0 +} + func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go index 911f67d31ba..ba6780a8e39 100644 --- a/vendor/github.com/Shopify/sarama/delete_topics_request.go +++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go @@ -38,6 +38,10 @@ func (d *DeleteTopicsRequest) version() int16 { return d.Version } +func (d *DeleteTopicsRequest) headerVersion() int16 { + return 1 +} + func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go index 34225460a31..733961a89a0 100644 --- a/vendor/github.com/Shopify/sarama/delete_topics_response.go +++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go @@ -68,6 +68,10 @@ func (d *DeleteTopicsResponse) version() int16 { return d.Version } +func (d *DeleteTopicsResponse) headerVersion() int16 { + return 0 +} + func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go index ccb587b35cf..d0c73528081 100644 --- a/vendor/github.com/Shopify/sarama/describe_configs_request.go +++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go @@ -100,6 +100,10 @@ func (r *DescribeConfigsRequest) version() int16 { return r.Version } +func (r *DescribeConfigsRequest) headerVersion() int16 { + return 1 +} + func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go index 5737232255a..063ae911259 100644 --- a/vendor/github.com/Shopify/sarama/describe_configs_response.go +++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go @@ -112,6 +112,10 @@ func (r *DescribeConfigsResponse) version() int16 { return r.Version } +func (r *DescribeConfigsResponse) headerVersion() int16 { + return 0 +} + func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: @@ -249,12 +253,16 @@ func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) { return err } r.Default = defaultB + if defaultB { + r.Source = SourceDefault + } } else { source, err := pd.getInt8() if err != nil { return err } r.Source = ConfigSource(source) + r.Default = r.Source == SourceDefault } sensitive, err := pd.getBool() @@ -277,7 +285,6 @@ func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) { } r.Synonyms[i] = s } - } return nil } diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go index 1fb35677708..f8962da58fc 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_request.go +++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go @@ -21,6 +21,10 @@ func (r *DescribeGroupsRequest) version() int16 { return 0 } +func (r *DescribeGroupsRequest) headerVersion() int16 { + return 1 +} + func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go index 542b3a97170..bc242e4217d 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go @@ -43,6 +43,10 @@ func (r *DescribeGroupsResponse) version() int16 { return 0 } +func (r *DescribeGroupsResponse) headerVersion() int16 { + return 0 +} + func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go index cb1e7815262..c0bf04e04e2 100644 --- a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go +++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go @@ -78,6 +78,10 @@ func (r *DescribeLogDirsRequest) version() int16 { return r.Version } +func (r *DescribeLogDirsRequest) headerVersion() int16 { + return 1 +} + func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion { return V1_0_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go index d207312efbf..411da38ad20 100644 --- a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go +++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go @@ -61,6 +61,10 @@ func (r *DescribeLogDirsResponse) version() int16 { return r.Version } +func (r *DescribeLogDirsResponse) headerVersion() int16 { + return 0 +} + func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion { return V1_0_0_0 } @@ -80,6 +84,9 @@ func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error { return err } + if err := pe.putArrayLength(len(r.Topics)); err != nil { + return err + } for _, topic := range r.Topics { if err := topic.encode(pe); err != nil { return err @@ -133,6 +140,9 @@ func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error { return err } + if err := pe.putArrayLength(len(r.Partitions)); err != nil { + return err + } for _, partition := range r.Partitions { if err := partition.encode(pe); err != nil { return err diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml index 481f68144bb..57b2d3ca86e 100644 --- a/vendor/github.com/Shopify/sarama/dev.yml +++ b/vendor/github.com/Shopify/sarama/dev.yml @@ -2,7 +2,7 @@ name: sarama up: - go: - version: '1.13.1' + version: '1.14' commands: test: diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go index 7ce3bc0f6e2..025bad61f06 100644 --- a/vendor/github.com/Shopify/sarama/encoder_decoder.go +++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go @@ -12,6 +12,11 @@ type encoder interface { encode(pe packetEncoder) error } +type encoderWithHeader interface { + encoder + headerVersion() int16 +} + // Encode takes an Encoder and turns it into bytes while potentially recording metrics. func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { if e == nil { diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go index 2cd9b506d3f..6635425ddd6 100644 --- a/vendor/github.com/Shopify/sarama/end_txn_request.go +++ b/vendor/github.com/Shopify/sarama/end_txn_request.go @@ -45,6 +45,10 @@ func (a *EndTxnRequest) version() int16 { return 0 } +func (r *EndTxnRequest) headerVersion() int16 { + return 1 +} + func (a *EndTxnRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go index 33b27e33d49..763976726cc 100644 --- a/vendor/github.com/Shopify/sarama/end_txn_response.go +++ b/vendor/github.com/Shopify/sarama/end_txn_response.go @@ -39,6 +39,10 @@ func (e *EndTxnResponse) version() int16 { return 0 } +func (r *EndTxnResponse) headerVersion() int16 { + return 0 +} + func (e *EndTxnResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go index 97be3c0f156..ca621b09268 100644 --- a/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/Shopify/sarama/errors.go @@ -94,6 +94,14 @@ func (mErr MultiError) Error() string { return errString } +func (mErr MultiError) PrettyError() string { + var errString = "" + for _, err := range *mErr.Errors { + errString += err.Error() + "\n" + } + return errString +} + // ErrDeleteRecords is the type of error returned when fail to delete the required records type ErrDeleteRecords struct { MultiError @@ -103,6 +111,14 @@ func (err ErrDeleteRecords) Error() string { return "kafka server: failed to delete records " + err.MultiError.Error() } +type ErrReassignPartitions struct { + MultiError +} + +func (err ErrReassignPartitions) Error() string { + return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError()) +} + // Numeric error codes returned by the Kafka server. const ( ErrNoError KError = 0 diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go index 4db9ddd3d7a..f893aeff7d5 100644 --- a/vendor/github.com/Shopify/sarama/fetch_request.go +++ b/vendor/github.com/Shopify/sarama/fetch_request.go @@ -1,20 +1,41 @@ package sarama type fetchRequestBlock struct { - fetchOffset int64 - maxBytes int32 + Version int16 + currentLeaderEpoch int32 + fetchOffset int64 + logStartOffset int64 + maxBytes int32 } -func (b *fetchRequestBlock) encode(pe packetEncoder) error { +func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error { + b.Version = version + if b.Version >= 9 { + pe.putInt32(b.currentLeaderEpoch) + } pe.putInt64(b.fetchOffset) + if b.Version >= 5 { + pe.putInt64(b.logStartOffset) + } pe.putInt32(b.maxBytes) return nil } -func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { +func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) { + b.Version = version + if b.Version >= 9 { + if b.currentLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } if b.fetchOffset, err = pd.getInt64(); err != nil { return err } + if b.Version >= 5 { + if b.logStartOffset, err = pd.getInt64(); err != nil { + return err + } + } if b.maxBytes, err = pd.getInt32(); err != nil { return err } @@ -25,12 +46,16 @@ func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { // https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at // https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes type FetchRequest struct { - MaxWaitTime int32 - MinBytes int32 - MaxBytes int32 - Version int16 - Isolation IsolationLevel - blocks map[string]map[int32]*fetchRequestBlock + MaxWaitTime int32 + MinBytes int32 + MaxBytes int32 + Version int16 + Isolation IsolationLevel + SessionID int32 + SessionEpoch int32 + blocks map[string]map[int32]*fetchRequestBlock + forgotten map[string][]int32 + RackID string } type IsolationLevel int8 @@ -50,6 +75,10 @@ func (r *FetchRequest) encode(pe packetEncoder) (err error) { if r.Version >= 4 { pe.putInt8(int8(r.Isolation)) } + if r.Version >= 7 { + pe.putInt32(r.SessionID) + pe.putInt32(r.SessionEpoch) + } err = pe.putArrayLength(len(r.blocks)) if err != nil { return err @@ -65,17 +94,44 @@ func (r *FetchRequest) encode(pe packetEncoder) (err error) { } for partition, block := range blocks { pe.putInt32(partition) - err = block.encode(pe) + err = block.encode(pe, r.Version) if err != nil { return err } } } + if r.Version >= 7 { + err = pe.putArrayLength(len(r.forgotten)) + if err != nil { + return err + } + for topic, partitions := range r.forgotten { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for _, partition := range partitions { + pe.putInt32(partition) + } + } + } + if r.Version >= 11 { + err = pe.putString(r.RackID) + if err != nil { + return err + } + } + return nil } func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version + if _, err = pd.getInt32(); err != nil { return err } @@ -97,6 +153,16 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { } r.Isolation = IsolationLevel(isolation) } + if r.Version >= 7 { + r.SessionID, err = pd.getInt32() + if err != nil { + return err + } + r.SessionEpoch, err = pd.getInt32() + if err != nil { + return err + } + } topicCount, err := pd.getArrayLength() if err != nil { return err @@ -121,12 +187,47 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { return err } fetchBlock := &fetchRequestBlock{} - if err = fetchBlock.decode(pd); err != nil { + if err = fetchBlock.decode(pd, r.Version); err != nil { return err } r.blocks[topic][partition] = fetchBlock } } + + if r.Version >= 7 { + forgottenCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.forgotten = make(map[string][]int32) + for i := 0; i < forgottenCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.forgotten[topic] = make([]int32, partitionCount) + + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + r.forgotten[topic][j] = partition + } + } + } + + if r.Version >= 11 { + r.RackID, err = pd.getString() + if err != nil { + return err + } + } + return nil } @@ -138,18 +239,34 @@ func (r *FetchRequest) version() int16 { return r.Version } +func (r *FetchRequest) headerVersion() int16 { + return 1 +} + func (r *FetchRequest) requiredVersion() KafkaVersion { switch r.Version { + case 0: + return MinVersion case 1: return V0_9_0_0 case 2: return V0_10_0_0 case 3: return V0_10_1_0 - case 4: + case 4, 5: return V0_11_0_0 + case 6: + return V1_0_0_0 + case 7: + return V1_1_0_0 + case 8: + return V2_0_0_0 + case 9, 10: + return V2_1_0_0 + case 11: + return V2_3_0_0 default: - return MinVersion + return MaxVersion } } @@ -158,13 +275,21 @@ func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int r.blocks = make(map[string]map[int32]*fetchRequestBlock) } + if r.Version >= 7 && r.forgotten == nil { + r.forgotten = make(map[string][]int32) + } + if r.blocks[topic] == nil { r.blocks[topic] = make(map[int32]*fetchRequestBlock) } tmp := new(fetchRequestBlock) + tmp.Version = r.Version tmp.maxBytes = maxBytes tmp.fetchOffset = fetchOffset + if r.Version >= 9 { + tmp.currentLeaderEpoch = int32(-1) + } r.blocks[topic][partitionID] = tmp } diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go index 3afc1877895..ca6d78832cf 100644 --- a/vendor/github.com/Shopify/sarama/fetch_response.go +++ b/vendor/github.com/Shopify/sarama/fetch_response.go @@ -30,13 +30,15 @@ func (t *AbortedTransaction) encode(pe packetEncoder) (err error) { } type FetchResponseBlock struct { - Err KError - HighWaterMarkOffset int64 - LastStableOffset int64 - AbortedTransactions []*AbortedTransaction - Records *Records // deprecated: use FetchResponseBlock.RecordsSet - RecordsSet []*Records - Partial bool + Err KError + HighWaterMarkOffset int64 + LastStableOffset int64 + LogStartOffset int64 + AbortedTransactions []*AbortedTransaction + PreferredReadReplica int32 + Records *Records // deprecated: use FetchResponseBlock.RecordsSet + RecordsSet []*Records + Partial bool } func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { @@ -57,6 +59,13 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) return err } + if version >= 5 { + b.LogStartOffset, err = pd.getInt64() + if err != nil { + return err + } + } + numTransact, err := pd.getArrayLength() if err != nil { return err @@ -75,6 +84,13 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) } } + if version >= 11 { + b.PreferredReadReplica, err = pd.getInt32() + if err != nil { + return err + } + } + recordsSize, err := pd.getInt32() if err != nil { return err @@ -166,6 +182,10 @@ func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) if version >= 4 { pe.putInt64(b.LastStableOffset) + if version >= 5 { + pe.putInt64(b.LogStartOffset) + } + if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil { return err } @@ -176,6 +196,10 @@ func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) } } + if version >= 11 { + pe.putInt32(b.PreferredReadReplica) + } + pe.push(&lengthField{}) for _, records := range b.RecordsSet { err = records.encode(pe) @@ -200,7 +224,9 @@ func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction { type FetchResponse struct { Blocks map[string]map[int32]*FetchResponseBlock ThrottleTime time.Duration - Version int16 // v1 requires 0.9+, v2 requires 0.10+ + ErrorCode int16 + SessionID int32 + Version int16 LogAppendTime bool Timestamp time.Time } @@ -216,6 +242,17 @@ func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { r.ThrottleTime = time.Duration(throttle) * time.Millisecond } + if r.Version >= 7 { + r.ErrorCode, err = pd.getInt16() + if err != nil { + return err + } + r.SessionID, err = pd.getInt32() + if err != nil { + return err + } + } + numTopics, err := pd.getArrayLength() if err != nil { return err @@ -258,6 +295,11 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) { pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) } + if r.Version >= 7 { + pe.putInt16(r.ErrorCode) + pe.putInt32(r.SessionID) + } + err = pe.putArrayLength(len(r.Blocks)) if err != nil { return err @@ -281,7 +323,6 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) { return err } } - } return nil } @@ -294,18 +335,34 @@ func (r *FetchResponse) version() int16 { return r.Version } +func (r *FetchResponse) headerVersion() int16 { + return 0 +} + func (r *FetchResponse) requiredVersion() KafkaVersion { switch r.Version { + case 0: + return MinVersion case 1: return V0_9_0_0 case 2: return V0_10_0_0 case 3: return V0_10_1_0 - case 4: + case 4, 5: return V0_11_0_0 + case 6: + return V1_0_0_0 + case 7: + return V1_1_0_0 + case 8: + return V2_0_0_0 + case 9, 10: + return V2_1_0_0 + case 11: + return V2_3_0_0 default: - return MinVersion + return MaxVersion } } diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go index ff2ad206c42..597bcbf786f 100644 --- a/vendor/github.com/Shopify/sarama/find_coordinator_request.go +++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go @@ -51,6 +51,10 @@ func (f *FindCoordinatorRequest) version() int16 { return f.Version } +func (r *FindCoordinatorRequest) headerVersion() int16 { + return 1 +} + func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion { switch f.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go index 9c900e8b774..83a648ad4ae 100644 --- a/vendor/github.com/Shopify/sarama/find_coordinator_response.go +++ b/vendor/github.com/Shopify/sarama/find_coordinator_response.go @@ -82,6 +82,10 @@ func (f *FindCoordinatorResponse) version() int16 { return f.Version } +func (r *FindCoordinatorResponse) headerVersion() int16 { + return 0 +} + func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion { switch f.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/go.mod b/vendor/github.com/Shopify/sarama/go.mod index 4337c009a12..d3a93763a58 100644 --- a/vendor/github.com/Shopify/sarama/go.mod +++ b/vendor/github.com/Shopify/sarama/go.mod @@ -5,25 +5,30 @@ go 1.13 require ( github.com/Shopify/toxiproxy v2.1.4+incompatible github.com/davecgh/go-spew v1.1.1 - github.com/eapache/go-resiliency v1.1.0 + github.com/eapache/go-resiliency v1.2.0 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 github.com/eapache/queue v1.1.0 github.com/fortytw2/leaktest v1.3.0 - github.com/frankban/quicktest v1.4.1 // indirect + github.com/frankban/quicktest v1.7.2 // indirect github.com/golang/snappy v0.0.1 // indirect - github.com/hashicorp/go-uuid v1.0.1 // indirect - github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 // indirect - github.com/klauspost/compress v1.8.2 - github.com/pierrec/lz4 v2.2.6+incompatible - github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a - github.com/stretchr/testify v1.3.0 + github.com/google/go-cmp v0.4.0 // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/jcmturner/gofork v1.0.0 + github.com/klauspost/compress v1.9.8 + github.com/kr/pretty v0.2.0 // indirect + github.com/pierrec/lz4 v2.4.1+incompatible + github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 + github.com/stretchr/testify v1.4.0 github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c github.com/xdg/stringprep v1.0.0 // indirect - golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 // indirect - golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 + golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 // indirect + golang.org/x/net v0.0.0-20200202094626-16171245cfb2 + golang.org/x/text v0.3.2 // indirect + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect - gopkg.in/jcmturner/gokrb5.v7 v7.2.3 + gopkg.in/jcmturner/gokrb5.v7 v7.5.0 gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect ) diff --git a/vendor/github.com/Shopify/sarama/go.sum b/vendor/github.com/Shopify/sarama/go.sum index d2f04eedc25..06ec3280c2d 100644 --- a/vendor/github.com/Shopify/sarama/go.sum +++ b/vendor/github.com/Shopify/sarama/go.sum @@ -1,67 +1,81 @@ -github.com/DataDog/zstd v1.4.0 h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo= -github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg= -github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= +github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= +github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM= -github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/klauspost/compress v1.8.1 h1:oygt2ychZFHOB6M9gUgajzgKrwRgHbGC77NwA4COVgI= -github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.8.2 h1:Bx0qjetmNjdFXASH02NSAREKpiaDwkO1DRZ3dV2KCcs= -github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= +github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= -github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg= +github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 h1:bselrhR0Or1vomJZC8ZIjWtbDmn9OYFLX5Ik9alpJpE= -golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w= +golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010= -gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go index 57f3ecbb2d0..d585f1a2bb3 100644 --- a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go +++ b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go @@ -1,13 +1,13 @@ package sarama import ( - "encoding/asn1" "encoding/binary" "fmt" "io" "strings" "time" + "github.com/jcmturner/gofork/encoding/asn1" "gopkg.in/jcmturner/gokrb5.v7/asn1tools" "gopkg.in/jcmturner/gokrb5.v7/gssapi" "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" @@ -200,7 +200,6 @@ func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient K /* This does the handshake for authorization */ func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error { - kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config) if err != nil { Logger.Printf("Kerberos client error: %s", err) @@ -243,7 +242,7 @@ func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error { } broker.updateOutgoingCommunicationMetrics(bytesWritten) if krbAuth.step == GSS_API_VERIFY { - var bytesRead = 0 + bytesRead := 0 receivedBytes, bytesRead, err = krbAuth.readPackage(broker) requestLatency := time.Since(requestTime) broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency) diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go index ce49c473972..e9d9af19110 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_request.go +++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go @@ -42,6 +42,10 @@ func (r *HeartbeatRequest) version() int16 { return 0 } +func (r *HeartbeatRequest) headerVersion() int16 { + return 1 +} + func (r *HeartbeatRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go index 766f5fdec6f..577ab72e574 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_response.go +++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go @@ -27,6 +27,10 @@ func (r *HeartbeatResponse) version() int16 { return 0 } +func (r *HeartbeatResponse) headerVersion() int16 { + return 0 +} + func (r *HeartbeatResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go index 8ceb6c23255..689444397d6 100644 --- a/vendor/github.com/Shopify/sarama/init_producer_id_request.go +++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go @@ -38,6 +38,10 @@ func (i *InitProducerIDRequest) version() int16 { return 0 } +func (i *InitProducerIDRequest) headerVersion() int16 { + return 1 +} + func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go index 1b32eb085b2..3e1242bf622 100644 --- a/vendor/github.com/Shopify/sarama/init_producer_id_response.go +++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go @@ -50,6 +50,10 @@ func (i *InitProducerIDResponse) version() int16 { return 0 } +func (i *InitProducerIDResponse) headerVersion() int16 { + return 0 +} + func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go index 97e9299ea1a..3734e82e406 100644 --- a/vendor/github.com/Shopify/sarama/join_group_request.go +++ b/vendor/github.com/Shopify/sarama/join_group_request.go @@ -134,6 +134,10 @@ func (r *JoinGroupRequest) version() int16 { return r.Version } +func (r *JoinGroupRequest) headerVersion() int16 { + return 1 +} + func (r *JoinGroupRequest) requiredVersion() KafkaVersion { switch r.Version { case 2: diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go index 5752acc8aeb..54b0a45c28e 100644 --- a/vendor/github.com/Shopify/sarama/join_group_response.go +++ b/vendor/github.com/Shopify/sarama/join_group_response.go @@ -123,6 +123,10 @@ func (r *JoinGroupResponse) version() int16 { return r.Version } +func (r *JoinGroupResponse) headerVersion() int16 { + return 0 +} + func (r *JoinGroupResponse) requiredVersion() KafkaVersion { switch r.Version { case 2: diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go index e177427482f..d7789b68dbe 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_request.go +++ b/vendor/github.com/Shopify/sarama/leave_group_request.go @@ -35,6 +35,10 @@ func (r *LeaveGroupRequest) version() int16 { return 0 } +func (r *LeaveGroupRequest) headerVersion() int16 { + return 1 +} + func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go index d60c626da01..25f8d5eb36b 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_response.go +++ b/vendor/github.com/Shopify/sarama/leave_group_response.go @@ -27,6 +27,10 @@ func (r *LeaveGroupResponse) version() int16 { return 0 } +func (r *LeaveGroupResponse) headerVersion() int16 { + return 0 +} + func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go index 3b16abf7fa8..ed44cc27e36 100644 --- a/vendor/github.com/Shopify/sarama/list_groups_request.go +++ b/vendor/github.com/Shopify/sarama/list_groups_request.go @@ -19,6 +19,10 @@ func (r *ListGroupsRequest) version() int16 { return 0 } +func (r *ListGroupsRequest) headerVersion() int16 { + return 1 +} + func (r *ListGroupsRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go index 56115d4c75a..777bae7e63e 100644 --- a/vendor/github.com/Shopify/sarama/list_groups_response.go +++ b/vendor/github.com/Shopify/sarama/list_groups_response.go @@ -64,6 +64,10 @@ func (r *ListGroupsResponse) version() int16 { return 0 } +func (r *ListGroupsResponse) headerVersion() int16 { + return 0 +} + func (r *ListGroupsResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go new file mode 100644 index 00000000000..c1ffa9ba02b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go @@ -0,0 +1,98 @@ +package sarama + +type ListPartitionReassignmentsRequest struct { + TimeoutMs int32 + blocks map[string][]int32 + Version int16 +} + +func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error { + pe.putInt32(r.TimeoutMs) + + pe.putCompactArrayLength(len(r.blocks)) + + for topic, partitions := range r.blocks { + if err := pe.putCompactString(topic); err != nil { + return err + } + + if err := pe.putCompactInt32Array(partitions); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.TimeoutMs, err = pd.getInt32(); err != nil { + return err + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.blocks = make(map[string][]int32) + for i := 0; i < topicCount; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make([]int32, partitionCount) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + r.blocks[topic][j] = partition + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return +} + +func (r *ListPartitionReassignmentsRequest) key() int16 { + return 46 +} + +func (r *ListPartitionReassignmentsRequest) version() int16 { + return r.Version +} + +func (r *ListPartitionReassignmentsRequest) headerVersion() int16 { + return 2 +} + +func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) { + if r.blocks == nil { + r.blocks = make(map[string][]int32) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = partitionIDs + } +} diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go new file mode 100644 index 00000000000..4baa6a08e83 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go @@ -0,0 +1,169 @@ +package sarama + +type PartitionReplicaReassignmentsStatus struct { + Replicas []int32 + AddingReplicas []int32 + RemovingReplicas []int32 +} + +func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error { + if err := pe.putCompactInt32Array(b.Replicas); err != nil { + return err + } + if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil { + return err + } + if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) { + if b.Replicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return err +} + +type ListPartitionReassignmentsResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ErrorMessage *string + TopicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus +} + +func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) { + if r.TopicStatus == nil { + r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus) + } + partitions := r.TopicStatus[topic] + if partitions == nil { + partitions = make(map[int32]*PartitionReplicaReassignmentsStatus) + r.TopicStatus[topic] = partitions + } + + partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas} +} + +func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.TopicStatus)) + for topic, partitions := range r.TopicStatus { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics) + for i := 0; i < numTopics; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + + ongoingPartitionReassignments, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments) + + for j := 0; j < ongoingPartitionReassignments; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + + block := &PartitionReplicaReassignmentsStatus{} + if err := block.decode(pd); err != nil { + return err + } + r.TopicStatus[topic][partition] = block + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *ListPartitionReassignmentsResponse) key() int16 { + return 46 +} + +func (r *ListPartitionReassignmentsResponse) version() int16 { + return r.Version +} + +func (r *ListPartitionReassignmentsResponse) headerVersion() int16 { + return 1 +} + +func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go index 7c54748c2b3..e48566b37cd 100644 --- a/vendor/github.com/Shopify/sarama/message.go +++ b/vendor/github.com/Shopify/sarama/message.go @@ -85,7 +85,6 @@ func (m *Message) encode(pe packetEncoder) error { payload = m.compressedCache m.compressedCache = nil } else if m.Value != nil { - payload, err = compress(m.Codec, m.CompressionLevel, m.Value) if err != nil { return err diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go index 1b590d368f2..e835f5a9c8a 100644 --- a/vendor/github.com/Shopify/sarama/metadata_request.go +++ b/vendor/github.com/Shopify/sarama/metadata_request.go @@ -65,6 +65,10 @@ func (r *MetadataRequest) version() int16 { return r.Version } +func (r *MetadataRequest) headerVersion() int16 { + return 1 +} + func (r *MetadataRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go index b2d532e41f6..0bb8702cc37 100644 --- a/vendor/github.com/Shopify/sarama/metadata_response.go +++ b/vendor/github.com/Shopify/sarama/metadata_response.go @@ -255,6 +255,10 @@ func (r *MetadataResponse) version() int16 { return r.Version } +func (r *MetadataResponse) headerVersion() int16 { + return 0 +} + func (r *MetadataResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: @@ -318,5 +322,4 @@ foundPartition: pmatch.Isr = isr pmatch.OfflineReplicas = offline pmatch.Err = err - } diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go index 4ed46a61aa4..ff5a68ae7fe 100644 --- a/vendor/github.com/Shopify/sarama/mockbroker.go +++ b/vendor/github.com/Shopify/sarama/mockbroker.go @@ -20,7 +20,7 @@ const ( type GSSApiHandlerFunc func([]byte) []byte -type requestHandlerFunc func(req *request) (res encoder) +type requestHandlerFunc func(req *request) (res encoderWithHeader) // RequestNotifierFunc is invoked when a mock broker processes a request successfully // and will provides the number of bytes read and written. @@ -55,7 +55,7 @@ type MockBroker struct { port int32 closing chan none stopper chan none - expectations chan encoder + expectations chan encoderWithHeader listener net.Listener t TestReporter latency time.Duration @@ -83,7 +83,7 @@ func (b *MockBroker) SetLatency(latency time.Duration) { // and uses the found MockResponse instance to generate an appropriate reply. // If the request type is not found in the map then nothing is sent. func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { - b.setHandler(func(req *request) (res encoder) { + b.setHandler(func(req *request) (res encoderWithHeader) { reqTypeName := reflect.TypeOf(req.body).Elem().Name() mockResponse := handlerMap[reqTypeName] if mockResponse == nil { @@ -213,7 +213,7 @@ func (b *MockBroker) isGSSAPI(buffer []byte) bool { return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04}) } -func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) { +func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) { defer wg.Done() defer func() { _ = conn.Close() @@ -231,11 +231,9 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) } }() - resHeader := make([]byte, 8) var bytesWritten int var bytesRead int for { - buffer, err := b.readToBytes(conn) if err != nil { Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer)) @@ -245,7 +243,6 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) bytesWritten = 0 if !b.isGSSAPI(buffer) { - req, br, err := decodeRequest(bytes.NewReader(buffer)) bytesRead = br if err != nil { @@ -283,8 +280,7 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) continue } - binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4)) - binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID)) + resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes))) if _, err = conn.Write(resHeader); err != nil { b.serverError(err) break @@ -294,7 +290,6 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) break } bytesWritten = len(resHeader) + len(encodedRes) - } else { // GSSAPI is not part of kafka protocol, but is supported for authentication proposes. // Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism @@ -317,12 +312,29 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) b.notifier(bytesRead, bytesWritten) } b.lock.Unlock() - } Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) } -func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) { +func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte { + headerLength := uint32(8) + + if headerVersion >= 1 { + headerLength = 9 + } + + resHeader := make([]byte, headerLength) + binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4) + binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId)) + + if headerVersion >= 1 { + binary.PutUvarint(resHeader[8:], 0) + } + + return resHeader +} + +func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) { select { case res, ok := <-b.expectations: if !ok { @@ -377,7 +389,7 @@ func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener stopper: make(chan none), t: t, brokerID: brokerID, - expectations: make(chan encoder, 512), + expectations: make(chan encoderWithHeader, 512), listener: listener, } broker.handler = broker.defaultRequestHandler @@ -398,6 +410,6 @@ func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener return broker } -func (b *MockBroker) Returns(e encoder) { +func (b *MockBroker) Returns(e encoderWithHeader) { b.expectations <- e } diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/Shopify/sarama/mockkerberos.go index affeb2db45b..d36649d8ace 100644 --- a/vendor/github.com/Shopify/sarama/mockkerberos.go +++ b/vendor/github.com/Shopify/sarama/mockkerberos.go @@ -3,6 +3,7 @@ package sarama import ( "encoding/binary" "encoding/hex" + "gopkg.in/jcmturner/gokrb5.v7/credentials" "gopkg.in/jcmturner/gokrb5.v7/gssapi" "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" @@ -55,7 +56,6 @@ func (h *KafkaGSSAPIHandler) MockKafkaGSSAPI(buffer []byte) []byte { } type MockKerberosClient struct { - asReqBytes string asRepBytes string ASRep messages.ASRep credentials *credentials.Credentials diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go index 7dcc93e3600..2560da8af3e 100644 --- a/vendor/github.com/Shopify/sarama/mockresponses.go +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -18,20 +18,20 @@ type TestReporter interface { // allows generating a response based on a request body. MockResponses are used // to program behavior of MockBroker in tests. type MockResponse interface { - For(reqBody versionedDecoder) (res encoder) + For(reqBody versionedDecoder) (res encoderWithHeader) } // MockWrapper is a mock response builder that returns a particular concrete // response regardless of the actual request passed to the `For` method. type MockWrapper struct { - res encoder + res encoderWithHeader } -func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) { +func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) { return mw.res } -func NewMockWrapper(res encoder) *MockWrapper { +func NewMockWrapper(res encoderWithHeader) *MockWrapper { return &MockWrapper{res: res} } @@ -50,7 +50,7 @@ func NewMockSequence(responses ...interface{}) *MockSequence { switch res := res.(type) { case MockResponse: ms.responses[i] = res - case encoder: + case encoderWithHeader: ms.responses[i] = NewMockWrapper(res) default: panic(fmt.Sprintf("Unexpected response type: %T", res)) @@ -59,7 +59,7 @@ func NewMockSequence(responses ...interface{}) *MockSequence { return ms } -func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { +func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) { res = mc.responses[0].For(reqBody) if len(mc.responses) > 1 { mc.responses = mc.responses[1:] @@ -79,7 +79,7 @@ func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse { } } -func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoder { +func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { request := reqBody.(*ListGroupsRequest) _ = request response := &ListGroupsResponse{ @@ -110,7 +110,7 @@ func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, descrip return m } -func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoder { +func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { request := reqBody.(*DescribeGroupsRequest) response := &DescribeGroupsResponse{} @@ -166,7 +166,7 @@ func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResp return mmr } -func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { +func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { metadataRequest := reqBody.(*MetadataRequest) metadataResponse := &MetadataResponse{ Version: metadataRequest.version(), @@ -233,7 +233,7 @@ func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, of return mor } -func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder { +func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { offsetRequest := reqBody.(*OffsetRequest) offsetResponse := &OffsetResponse{Version: mor.version} for topic, partitions := range offsetRequest.blocks { @@ -309,7 +309,7 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of return mfr } -func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { +func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { fetchRequest := reqBody.(*FetchRequest) res := &FetchResponse{ Version: mfr.version, @@ -393,7 +393,7 @@ func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *M return mr } -func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ConsumerMetadataRequest) group := req.ConsumerGroup res := &ConsumerMetadataResponse{} @@ -442,7 +442,7 @@ func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, return mr } -func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*FindCoordinatorRequest) res := &FindCoordinatorResponse{} var v interface{} @@ -489,7 +489,7 @@ func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int3 return mr } -func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*OffsetCommitRequest) group := req.ConsumerGroup res := &OffsetCommitResponse{} @@ -546,7 +546,7 @@ func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KE return mr } -func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ProduceRequest) res := &ProduceResponse{ Version: mr.version, @@ -605,7 +605,7 @@ func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchRespo return mr } -func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*OffsetFetchRequest) group := req.ConsumerGroup res := &OffsetFetchResponse{Version: req.Version} @@ -630,7 +630,7 @@ func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse { return &MockCreateTopicsResponse{t: t} } -func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreateTopicsRequest) res := &CreateTopicsResponse{ Version: req.Version, @@ -659,7 +659,7 @@ func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse { return &MockDeleteTopicsResponse{t: t} } -func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteTopicsRequest) res := &DeleteTopicsResponse{} res.TopicErrorCodes = make(map[string]KError) @@ -667,7 +667,7 @@ func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder { for _, topic := range req.Topics { res.TopicErrorCodes[topic] = ErrNoError } - res.Version = int16(req.Version) + res.Version = req.Version return res } @@ -679,7 +679,7 @@ func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsRespon return &MockCreatePartitionsResponse{t: t} } -func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreatePartitionsRequest) res := &CreatePartitionsResponse{} res.TopicPartitionErrors = make(map[string]*TopicPartitionError) @@ -698,6 +698,43 @@ func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder { return res } +type MockAlterPartitionReassignmentsResponse struct { + t TestReporter +} + +func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse { + return &MockAlterPartitionReassignmentsResponse{t: t} +} + +func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterPartitionReassignmentsRequest) + _ = req + res := &AlterPartitionReassignmentsResponse{} + return res +} + +type MockListPartitionReassignmentsResponse struct { + t TestReporter +} + +func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse { + return &MockListPartitionReassignmentsResponse{t: t} +} + +func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ListPartitionReassignmentsRequest) + _ = req + res := &ListPartitionReassignmentsResponse{} + + for topic, partitions := range req.blocks { + for _, partition := range partitions { + res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2}) + } + } + + return res +} + type MockDeleteRecordsResponse struct { t TestReporter } @@ -706,7 +743,7 @@ func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse { return &MockDeleteRecordsResponse{t: t} } -func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteRecordsRequest) res := &DeleteRecordsResponse{} res.Topics = make(map[string]*DeleteRecordsResponseTopic) @@ -729,31 +766,84 @@ func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse return &MockDescribeConfigsResponse{t: t} } -func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DescribeConfigsRequest) - res := &DescribeConfigsResponse{} + res := &DescribeConfigsResponse{ + Version: req.Version, + } + + includeSynonyms := (req.Version > 0) + includeSource := (req.Version > 0) for _, r := range req.Resources { var configEntries []*ConfigEntry switch r.Type { - case TopicResource: + case BrokerResource: configEntries = append(configEntries, - &ConfigEntry{Name: "max.message.bytes", - Value: "1000000", - ReadOnly: false, - Default: true, - Sensitive: false, - }, &ConfigEntry{Name: "retention.ms", - Value: "5000", - ReadOnly: false, - Default: false, - Sensitive: false, - }, &ConfigEntry{Name: "password", - Value: "12345", - ReadOnly: false, - Default: false, - Sensitive: true, - }) + &ConfigEntry{ + Name: "min.insync.replicas", + Value: "2", + ReadOnly: false, + Default: false, + }, + ) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + case BrokerLoggerResource: + configEntries = append(configEntries, + &ConfigEntry{ + Name: "kafka.controller.KafkaController", + Value: "DEBUG", + ReadOnly: false, + Default: false, + }, + ) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + case TopicResource: + maxMessageBytes := &ConfigEntry{Name: "max.message.bytes", + Value: "1000000", + ReadOnly: false, + Default: !includeSource, + Sensitive: false, + } + if includeSource { + maxMessageBytes.Source = SourceDefault + } + if includeSynonyms { + maxMessageBytes.Synonyms = []*ConfigSynonym{ + { + ConfigName: "max.message.bytes", + ConfigValue: "500000", + }, + } + } + retentionMs := &ConfigEntry{Name: "retention.ms", + Value: "5000", + ReadOnly: false, + Default: false, + Sensitive: false, + } + if includeSynonyms { + retentionMs.Synonyms = []*ConfigSynonym{ + { + ConfigName: "log.retention.ms", + ConfigValue: "2500", + }, + } + } + password := &ConfigEntry{Name: "password", + Value: "12345", + ReadOnly: false, + Default: false, + Sensitive: true, + } + configEntries = append( + configEntries, maxMessageBytes, retentionMs, password) res.Resources = append(res.Resources, &ResourceResponse{ Name: r.Name, Configs: configEntries, @@ -763,6 +853,31 @@ func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoder { return res } +type MockDescribeConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode { + return &MockDescribeConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeConfigsRequest) + res := &DescribeConfigsResponse{ + Version: req.Version, + } + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) + } + return res +} + type MockAlterConfigsResponse struct { t TestReporter } @@ -771,19 +886,42 @@ func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse { return &MockAlterConfigsResponse{t: t} } -func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*AlterConfigsRequest) res := &AlterConfigsResponse{} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name, - Type: TopicResource, + Type: r.Type, ErrorMsg: "", }) } return res } +type MockAlterConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode { + return &MockAlterConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterConfigsRequest) + res := &AlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) + } + return res +} + type MockCreateAclsResponse struct { t TestReporter } @@ -792,7 +930,7 @@ func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse { return &MockCreateAclsResponse{t: t} } -func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreateAclsRequest) res := &CreateAclsResponse{} @@ -810,7 +948,7 @@ func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse { return &MockListAclsResponse{t: t} } -func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DescribeAclsRequest) res := &DescribeAclsResponse{} res.Err = ErrNoError @@ -852,7 +990,7 @@ func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateRespon return &MockSaslAuthenticateResponse{t: t} } -func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoder { +func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader { res := &SaslAuthenticateResponse{} res.Err = msar.kerror res.SaslAuthBytes = msar.saslAuthBytes @@ -883,7 +1021,7 @@ func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse { return &MockSaslHandshakeResponse{t: t} } -func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoder { +func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader { res := &SaslHandshakeResponse{} res.Err = mshr.kerror res.EnabledMechanisms = mshr.enabledMechanisms @@ -904,7 +1042,7 @@ func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse { return &MockDeleteAclsResponse{t: t} } -func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteAclsRequest) res := &DeleteAclsResponse{} @@ -930,7 +1068,7 @@ func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDelete return m } -func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoder { +func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { resp := &DeleteGroupsResponse{ GroupErrorCodes: map[string]KError{}, } @@ -939,3 +1077,45 @@ func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoder { } return resp } + +type MockDescribeLogDirsResponse struct { + t TestReporter + logDirs []DescribeLogDirsResponseDirMetadata +} + +func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse { + return &MockDescribeLogDirsResponse{t: t} +} + +func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse { + topics := []DescribeLogDirsResponseTopic{} + for topic := range topicPartitions { + partitions := []DescribeLogDirsResponsePartition{} + for i := 0; i < topicPartitions[topic]; i++ { + partitions = append(partitions, DescribeLogDirsResponsePartition{ + PartitionID: int32(i), + IsTemporary: false, + OffsetLag: int64(0), + Size: int64(1234), + }) + } + topics = append(topics, DescribeLogDirsResponseTopic{ + Topic: topic, + Partitions: partitions, + }) + } + logDir := DescribeLogDirsResponseDirMetadata{ + ErrorCode: ErrNoError, + Path: logDirPath, + Topics: topics, + } + m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir} + return m +} + +func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &DescribeLogDirsResponse{ + LogDirs: m.logDirs, + } + return resp +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go index 5732ed95c53..9931cade512 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go @@ -170,6 +170,10 @@ func (r *OffsetCommitRequest) version() int16 { return r.Version } +func (r *OffsetCommitRequest) headerVersion() int16 { + return 1 +} + func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go index e842298dbb6..342260ef599 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_response.go +++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go @@ -94,6 +94,10 @@ func (r *OffsetCommitResponse) version() int16 { return r.Version } +func (r *OffsetCommitResponse) headerVersion() int16 { + return 0 +} + func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go index 68608241ff6..51e9faa3f73 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go +++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go @@ -68,6 +68,10 @@ func (r *OffsetFetchRequest) version() int16 { return r.Version } +func (r *OffsetFetchRequest) headerVersion() int16 { + return 1 +} + func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go index 9e257028040..9c64e0708d1 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go +++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go @@ -155,6 +155,10 @@ func (r *OffsetFetchResponse) version() int16 { return r.Version } +func (r *OffsetFetchResponse) headerVersion() int16 { + return 0 +} + func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go index 923972f2690..19408729faa 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/Shopify/sarama/offset_manager.go @@ -58,7 +58,7 @@ func newOffsetManagerFromClient(group, memberID string, generation int32, client client: client, conf: conf, group: group, - ticker: time.NewTicker(conf.Consumer.Offsets.CommitInterval), + ticker: time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval), poms: make(map[string]map[int32]*partitionOffsetManager), memberID: memberID, @@ -233,7 +233,12 @@ func (om *offsetManager) mainLoop() { } } +// flushToBroker is ignored if auto-commit offsets is disabled func (om *offsetManager) flushToBroker() { + if !om.conf.Consumer.Offsets.AutoCommit.Enable { + return + } + req := om.constructRequest() if req == nil { return @@ -275,7 +280,6 @@ func (om *offsetManager) constructRequest() *OffsetCommitRequest { ConsumerID: om.memberID, ConsumerGroupGeneration: om.generation, } - } om.pomsLock.RLock() diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go index 326c3720cc7..c0b3305f661 100644 --- a/vendor/github.com/Shopify/sarama/offset_request.go +++ b/vendor/github.com/Shopify/sarama/offset_request.go @@ -6,7 +6,7 @@ type offsetRequestBlock struct { } func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { - pe.putInt64(int64(b.time)) + pe.putInt64(b.time) if version == 0 { pe.putInt32(b.maxOffsets) } @@ -116,6 +116,10 @@ func (r *OffsetRequest) version() int16 { return r.Version } +func (r *OffsetRequest) headerVersion() int16 { + return 1 +} + func (r *OffsetRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go index 8b2193f9a0b..ead3ebbcc2c 100644 --- a/vendor/github.com/Shopify/sarama/offset_response.go +++ b/vendor/github.com/Shopify/sarama/offset_response.go @@ -150,6 +150,10 @@ func (r *OffsetResponse) version() int16 { return r.Version } +func (r *OffsetResponse) headerVersion() int16 { + return 0 +} + func (r *OffsetResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go index 9be854c0741..ed00ba350b5 100644 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ b/vendor/github.com/Shopify/sarama/packet_decoder.go @@ -10,8 +10,11 @@ type packetDecoder interface { getInt32() (int32, error) getInt64() (int64, error) getVarint() (int64, error) + getUVarint() (uint64, error) getArrayLength() (int, error) + getCompactArrayLength() (int, error) getBool() (bool, error) + getEmptyTaggedFieldArray() (int, error) // Collections getBytes() ([]byte, error) @@ -19,6 +22,9 @@ type packetDecoder interface { getRawBytes(length int) ([]byte, error) getString() (string, error) getNullableString() (*string, error) + getCompactString() (string, error) + getCompactNullableString() (*string, error) + getCompactInt32Array() ([]int32, error) getInt32Array() ([]int32, error) getInt64Array() ([]int64, error) getStringArray() ([]string, error) diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go index 67b8daed829..50c735c0445 100644 --- a/vendor/github.com/Shopify/sarama/packet_encoder.go +++ b/vendor/github.com/Shopify/sarama/packet_encoder.go @@ -12,6 +12,8 @@ type packetEncoder interface { putInt32(in int32) putInt64(in int64) putVarint(in int64) + putUVarint(in uint64) + putCompactArrayLength(in int) putArrayLength(in int) error putBool(in bool) @@ -19,11 +21,16 @@ type packetEncoder interface { putBytes(in []byte) error putVarintBytes(in []byte) error putRawBytes(in []byte) error + putCompactString(in string) error + putNullableCompactString(in *string) error putString(in string) error putNullableString(in *string) error putStringArray(in []string) error + putCompactInt32Array(in []int32) error + putNullableCompactInt32Array(in []int32) error putInt32Array(in []int32) error putInt64Array(in []int64) error + putEmptyTaggedFieldArray() // Provide the current offset to record the batch size metric offset() int diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go index b633cd15111..827542c5030 100644 --- a/vendor/github.com/Shopify/sarama/prep_encoder.go +++ b/vendor/github.com/Shopify/sarama/prep_encoder.go @@ -2,6 +2,7 @@ package sarama import ( "encoding/binary" + "errors" "fmt" "math" @@ -36,6 +37,11 @@ func (pe *prepEncoder) putVarint(in int64) { pe.length += binary.PutVarint(buf[:], in) } +func (pe *prepEncoder) putUVarint(in uint64) { + var buf [binary.MaxVarintLen64]byte + pe.length += binary.PutUvarint(buf[:], in) +} + func (pe *prepEncoder) putArrayLength(in int) error { if in > math.MaxInt32 { return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} @@ -44,6 +50,10 @@ func (pe *prepEncoder) putArrayLength(in int) error { return nil } +func (pe *prepEncoder) putCompactArrayLength(in int) { + pe.putUVarint(uint64(in + 1)) +} + func (pe *prepEncoder) putBool(in bool) { pe.length++ } @@ -67,6 +77,20 @@ func (pe *prepEncoder) putVarintBytes(in []byte) error { return pe.putRawBytes(in) } +func (pe *prepEncoder) putCompactString(in string) error { + pe.putCompactArrayLength(len(in)) + return pe.putRawBytes([]byte(in)) +} + +func (pe *prepEncoder) putNullableCompactString(in *string) error { + if in == nil { + pe.putUVarint(0) + return nil + } else { + return pe.putCompactString(*in) + } +} + func (pe *prepEncoder) putRawBytes(in []byte) error { if len(in) > math.MaxInt32 { return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} @@ -107,6 +131,27 @@ func (pe *prepEncoder) putStringArray(in []string) error { return nil } +func (pe *prepEncoder) putCompactInt32Array(in []int32) error { + if in == nil { + return errors.New("expected int32 array to be non null") + } + + pe.putUVarint(uint64(len(in)) + 1) + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putNullableCompactInt32Array(in []int32) error { + if in == nil { + pe.putUVarint(0) + return nil + } + + pe.putUVarint(uint64(len(in)) + 1) + pe.length += 4 * len(in) + return nil +} + func (pe *prepEncoder) putInt32Array(in []int32) error { err := pe.putArrayLength(len(in)) if err != nil { @@ -125,6 +170,10 @@ func (pe *prepEncoder) putInt64Array(in []int64) error { return nil } +func (pe *prepEncoder) putEmptyTaggedFieldArray() { + pe.putUVarint(0) +} + func (pe *prepEncoder) offset() int { return pe.length } diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go index 0c755d02b64..0034651e254 100644 --- a/vendor/github.com/Shopify/sarama/produce_request.go +++ b/vendor/github.com/Shopify/sarama/produce_request.go @@ -206,6 +206,10 @@ func (r *ProduceRequest) version() int16 { return r.Version } +func (r *ProduceRequest) headerVersion() int16 { + return 1 +} + func (r *ProduceRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: @@ -214,6 +218,8 @@ func (r *ProduceRequest) requiredVersion() KafkaVersion { return V0_10_0_0 case 3: return V0_11_0_0 + case 7: + return V2_1_0_0 default: return MinVersion } diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go index 4c5cd3569c6..edf978790c9 100644 --- a/vendor/github.com/Shopify/sarama/produce_response.go +++ b/vendor/github.com/Shopify/sarama/produce_response.go @@ -5,11 +5,27 @@ import ( "time" ) +// Protocol, http://kafka.apache.org/protocol.html +// v1 +// v2 = v3 = v4 +// v5 = v6 = v7 +// Produce Response (Version: 7) => [responses] throttle_time_ms +// responses => topic [partition_responses] +// topic => STRING +// partition_responses => partition error_code base_offset log_append_time log_start_offset +// partition => INT32 +// error_code => INT16 +// base_offset => INT64 +// log_append_time => INT64 +// log_start_offset => INT64 +// throttle_time_ms => INT32 + +// partition_responses in protocol type ProduceResponseBlock struct { - Err KError - Offset int64 - // only provided if Version >= 2 and the broker is configured with `LogAppendTime` - Timestamp time.Time + Err KError // v0, error_code + Offset int64 // v0, base_offset + Timestamp time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime` + StartOffset int64 // v5, log_start_offset } func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { @@ -32,6 +48,13 @@ func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err erro } } + if version >= 5 { + b.StartOffset, err = pd.getInt64() + if err != nil { + return err + } + } + return nil } @@ -49,13 +72,17 @@ func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err erro pe.putInt64(timestamp) } + if version >= 5 { + pe.putInt64(b.StartOffset) + } + return nil } type ProduceResponse struct { - Blocks map[string]map[int32]*ProduceResponseBlock + Blocks map[string]map[int32]*ProduceResponseBlock // v0, responses Version int16 - ThrottleTime time.Duration // only provided if Version >= 1 + ThrottleTime time.Duration // v1, throttle_time_ms } func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { @@ -129,6 +156,7 @@ func (r *ProduceResponse) encode(pe packetEncoder) error { } } } + if r.Version >= 1 { pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) } @@ -143,17 +171,12 @@ func (r *ProduceResponse) version() int16 { return r.Version } +func (r *ProduceResponse) headerVersion() int16 { + return 0 +} + func (r *ProduceResponse) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - case 3: - return V0_11_0_0 - default: - return MinVersion - } + return MinVersion } func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go index bba0f7e1f09..9c70f818006 100644 --- a/vendor/github.com/Shopify/sarama/produce_set.go +++ b/vendor/github.com/Shopify/sarama/produce_set.go @@ -13,17 +13,22 @@ type partitionSet struct { } type produceSet struct { - parent *asyncProducer - msgs map[string]map[int32]*partitionSet + parent *asyncProducer + msgs map[string]map[int32]*partitionSet + producerID int64 + producerEpoch int16 bufferBytes int bufferCount int } func newProduceSet(parent *asyncProducer) *produceSet { + pid, epoch := parent.txnmgr.getProducerID() return &produceSet{ - msgs: make(map[string]map[int32]*partitionSet), - parent: parent, + msgs: make(map[string]map[int32]*partitionSet), + parent: parent, + producerID: pid, + producerEpoch: epoch, } } @@ -44,9 +49,10 @@ func (ps *produceSet) add(msg *ProducerMessage) error { } timestamp := msg.Timestamp - if msg.Timestamp.IsZero() { + if timestamp.IsZero() { timestamp = time.Now() } + timestamp = timestamp.Truncate(time.Millisecond) partitions := ps.msgs[msg.Topic] if partitions == nil { @@ -64,8 +70,8 @@ func (ps *produceSet) add(msg *ProducerMessage) error { Version: 2, Codec: ps.parent.conf.Producer.Compression, CompressionLevel: ps.parent.conf.Producer.CompressionLevel, - ProducerID: ps.parent.txnmgr.producerID, - ProducerEpoch: ps.parent.txnmgr.producerEpoch, + ProducerID: ps.producerID, + ProducerEpoch: ps.producerEpoch, } if ps.parent.conf.Producer.Idempotent { batch.FirstSequence = msg.sequenceNumber @@ -77,12 +83,17 @@ func (ps *produceSet) add(msg *ProducerMessage) error { } partitions[msg.Partition] = set } - set.msgs = append(set.msgs, msg) if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence { return errors.New("assertion failed: message out of sequence added to a batch") } + } + + // Past this point we can't return an error, because we've already added the message to the set. + set.msgs = append(set.msgs, msg) + + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { // We are being conservative here to avoid having to prep encode the record size += maximumRecordOverhead rec := &Record{ @@ -128,6 +139,10 @@ func (ps *produceSet) buildRequest() *ProduceRequest { req.Version = 3 } + if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) { + req.Version = 7 + } + for topic, partitionSets := range ps.msgs { for partition, set := range partitionSets { if req.Version >= 3 { diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go index 085cbb3cf89..8ac576db2a0 100644 --- a/vendor/github.com/Shopify/sarama/real_decoder.go +++ b/vendor/github.com/Shopify/sarama/real_decoder.go @@ -7,11 +7,11 @@ import ( var errInvalidArrayLength = PacketDecodingError{"invalid array length"} var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} -var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"} var errInvalidStringLength = PacketDecodingError{"invalid string length"} -var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"} var errVarintOverflow = PacketDecodingError{"varint overflow"} +var errUVarintOverflow = PacketDecodingError{"uvarint overflow"} var errInvalidBool = PacketDecodingError{"invalid bool"} +var errUnsupportedTaggedFields = PacketDecodingError{"non-empty tagged fields are not supported yet"} type realDecoder struct { raw []byte @@ -75,6 +75,22 @@ func (rd *realDecoder) getVarint() (int64, error) { return tmp, nil } +func (rd *realDecoder) getUVarint() (uint64, error) { + tmp, n := binary.Uvarint(rd.raw[rd.off:]) + if n == 0 { + rd.off = len(rd.raw) + return 0, ErrInsufficientData + } + + if n < 0 { + rd.off -= n + return 0, errUVarintOverflow + } + + rd.off += n + return tmp, nil +} + func (rd *realDecoder) getArrayLength() (int, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) @@ -91,6 +107,19 @@ func (rd *realDecoder) getArrayLength() (int, error) { return tmp, nil } +func (rd *realDecoder) getCompactArrayLength() (int, error) { + n, err := rd.getUVarint() + if err != nil { + return 0, err + } + + if n == 0 { + return 0, nil + } + + return int(n) - 1, nil +} + func (rd *realDecoder) getBool() (bool, error) { b, err := rd.getInt8() if err != nil || b == 0 { @@ -102,6 +131,19 @@ func (rd *realDecoder) getBool() (bool, error) { return true, nil } +func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) { + tagCount, err := rd.getUVarint() + if err != nil { + return 0, err + } + + if tagCount != 0 { + return 0, errUnsupportedTaggedFields + } + + return 0, nil +} + // collections func (rd *realDecoder) getBytes() ([]byte, error) { @@ -169,6 +211,58 @@ func (rd *realDecoder) getNullableString() (*string, error) { return &tmpStr, err } +func (rd *realDecoder) getCompactString() (string, error) { + n, err := rd.getUVarint() + if err != nil { + return "", err + } + + var length = int(n - 1) + + tmpStr := string(rd.raw[rd.off : rd.off+length]) + rd.off += length + return tmpStr, nil +} + +func (rd *realDecoder) getCompactNullableString() (*string, error) { + n, err := rd.getUVarint() + + if err != nil { + return nil, err + } + + var length = int(n - 1) + + if length < 0 { + return nil, err + } + + tmpStr := string(rd.raw[rd.off : rd.off+length]) + rd.off += length + return &tmpStr, err +} + +func (rd *realDecoder) getCompactInt32Array() ([]int32, error) { + n, err := rd.getUVarint() + if err != nil { + return nil, err + } + + if n == 0 { + return nil, nil + } + + arrayLength := int(n) - 1 + + ret := make([]int32, arrayLength) + + for i := range ret { + ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + } + return ret, nil +} + func (rd *realDecoder) getInt32Array() ([]int32, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go index 3c75387f779..ba073f7d38a 100644 --- a/vendor/github.com/Shopify/sarama/real_encoder.go +++ b/vendor/github.com/Shopify/sarama/real_encoder.go @@ -2,6 +2,7 @@ package sarama import ( "encoding/binary" + "errors" "github.com/rcrowley/go-metrics" ) @@ -39,11 +40,20 @@ func (re *realEncoder) putVarint(in int64) { re.off += binary.PutVarint(re.raw[re.off:], in) } +func (re *realEncoder) putUVarint(in uint64) { + re.off += binary.PutUvarint(re.raw[re.off:], in) +} + func (re *realEncoder) putArrayLength(in int) error { re.putInt32(int32(in)) return nil } +func (re *realEncoder) putCompactArrayLength(in int) { + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(in + 1)) +} + func (re *realEncoder) putBool(in bool) { if in { re.putInt8(1) @@ -78,6 +88,19 @@ func (re *realEncoder) putVarintBytes(in []byte) error { return re.putRawBytes(in) } +func (re *realEncoder) putCompactString(in string) error { + re.putCompactArrayLength(len(in)) + return re.putRawBytes([]byte(in)) +} + +func (re *realEncoder) putNullableCompactString(in *string) error { + if in == nil { + re.putInt8(0) + return nil + } + return re.putCompactString(*in) +} + func (re *realEncoder) putString(in string) error { re.putInt16(int16(len(in))) copy(re.raw[re.off:], in) @@ -108,6 +131,31 @@ func (re *realEncoder) putStringArray(in []string) error { return nil } +func (re *realEncoder) putCompactInt32Array(in []int32) error { + if in == nil { + return errors.New("expected int32 array to be non null") + } + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(len(in)) + 1) + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putNullableCompactInt32Array(in []int32) error { + if in == nil { + re.putUVarint(0) + return nil + } + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(len(in)) + 1) + for _, val := range in { + re.putInt32(val) + } + return nil +} + func (re *realEncoder) putInt32Array(in []int32) error { err := re.putArrayLength(len(in)) if err != nil { @@ -130,6 +178,10 @@ func (re *realEncoder) putInt64Array(in []int64) error { return nil } +func (re *realEncoder) putEmptyTaggedFieldArray() { + re.putUVarint(0) +} + func (re *realEncoder) offset() int { return re.off } diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go index 98160c7394d..f4c5e95f1de 100644 --- a/vendor/github.com/Shopify/sarama/records.go +++ b/vendor/github.com/Shopify/sarama/records.go @@ -8,7 +8,6 @@ const ( defaultRecords magicOffset = 16 - magicLength = 1 ) // Records implements a union type containing either a RecordBatch or a legacy MessageSet. diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go index 97437d67bd4..dcfd3946c81 100644 --- a/vendor/github.com/Shopify/sarama/request.go +++ b/vendor/github.com/Shopify/sarama/request.go @@ -11,6 +11,7 @@ type protocolBody interface { versionedDecoder key() int16 version() int16 + headerVersion() int16 requiredVersion() KafkaVersion } @@ -26,12 +27,19 @@ func (r *request) encode(pe packetEncoder) error { pe.putInt16(r.body.version()) pe.putInt32(r.correlationID) - err := pe.putString(r.clientID) - if err != nil { - return err + if r.body.headerVersion() >= 1 { + err := pe.putString(r.clientID) + if err != nil { + return err + } + } + + if r.body.headerVersion() >= 2 { + // we don't use tag headers at the moment so we just put an array length of 0 + pe.putUVarint(0) } - err = r.body.encode(pe) + err := r.body.encode(pe) if err != nil { return err } @@ -65,6 +73,14 @@ func (r *request) decode(pd packetDecoder) (err error) { return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} } + if r.body.headerVersion() >= 2 { + // tagged field + _, err = pd.getUVarint() + if err != nil { + return err + } + } + return r.body.decode(pd, version) } @@ -105,7 +121,7 @@ func allocateBody(key, version int16) protocolBody { case 0: return &ProduceRequest{} case 1: - return &FetchRequest{} + return &FetchRequest{Version: version} case 2: return &OffsetRequest{Version: version} case 3: @@ -166,6 +182,10 @@ func allocateBody(key, version int16) protocolBody { return &CreatePartitionsRequest{} case 42: return &DeleteGroupsRequest{} + case 45: + return &AlterPartitionReassignmentsRequest{} + case 46: + return &ListPartitionReassignmentsRequest{} } return nil } diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go index 7a759185187..5dffb75be65 100644 --- a/vendor/github.com/Shopify/sarama/response_header.go +++ b/vendor/github.com/Shopify/sarama/response_header.go @@ -10,7 +10,7 @@ type responseHeader struct { correlationID int32 } -func (r *responseHeader) decode(pd packetDecoder) (err error) { +func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) { r.length, err = pd.getInt32() if err != nil { return err @@ -20,5 +20,12 @@ func (r *responseHeader) decode(pd packetDecoder) (err error) { } r.correlationID, err = pd.getInt32() + + if version >= 1 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return err } diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go index 1e0277aebd5..48f362d287e 100644 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/Shopify/sarama/sarama.go @@ -39,6 +39,10 @@ Broker related metrics: | response-rate-for-broker- | meter | Responses/second received from a given broker | | response-size | histogram | Distribution of the response size in bytes for all brokers | | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | + | requests-in-flight | counter | The current number of in-flight requests awaiting a response | + | | | for all brokers | + | requests-in-flight-for-broker- | counter | The current number of in-flight requests awaiting a response | + | | | for a given broker | +----------------------------------------------+------------+---------------------------------------------------------------+ Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go index 54c8b0992e7..90504df6f52 100644 --- a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go @@ -24,6 +24,10 @@ func (r *SaslAuthenticateRequest) version() int16 { return 0 } +func (r *SaslAuthenticateRequest) headerVersion() int16 { + return 1 +} + func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion { return V1_0_0_0 } diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go index 0038c3f3665..3ef57b5afad 100644 --- a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go @@ -39,6 +39,10 @@ func (r *SaslAuthenticateResponse) version() int16 { return 0 } +func (r *SaslAuthenticateResponse) headerVersion() int16 { + return 0 +} + func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion { return V1_0_0_0 } diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go index fe5ba05048b..74dc3072f48 100644 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go @@ -29,6 +29,10 @@ func (r *SaslHandshakeRequest) version() int16 { return r.Version } +func (r *SaslHandshakeRequest) headerVersion() int16 { + return 1 +} + func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go index ef290d4bc6d..69dfc3178ec 100644 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go @@ -33,6 +33,10 @@ func (r *SaslHandshakeResponse) version() int16 { return 0 } +func (r *SaslHandshakeResponse) headerVersion() int16 { + return 0 +} + func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go index fe207080e03..ac6ecb13e04 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_request.go +++ b/vendor/github.com/Shopify/sarama/sync_group_request.go @@ -77,6 +77,10 @@ func (r *SyncGroupRequest) version() int16 { return 0 } +func (r *SyncGroupRequest) headerVersion() int16 { + return 1 +} + func (r *SyncGroupRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go index 194b382b4ab..af019c42f97 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_response.go +++ b/vendor/github.com/Shopify/sarama/sync_group_response.go @@ -36,6 +36,10 @@ func (r *SyncGroupResponse) version() int16 { return 0 } +func (r *SyncGroupResponse) headerVersion() int16 { + return 0 +} + func (r *SyncGroupResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go index 71e95b814cb..c4043a33520 100644 --- a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go @@ -91,6 +91,10 @@ func (a *TxnOffsetCommitRequest) version() int16 { return 0 } +func (a *TxnOffsetCommitRequest) headerVersion() int16 { + return 1 +} + func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go index 6c980f4066f..94d8029dace 100644 --- a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go @@ -78,6 +78,10 @@ func (a *TxnOffsetCommitResponse) version() int16 { return 0 } +func (a *TxnOffsetCommitResponse) headerVersion() int16 { + return 0 +} + func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go index 7c815cd3a31..d138a5eb321 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -26,9 +26,7 @@ func (slice int32Slice) Swap(i, j int) { func dupInt32Slice(input []int32) []int32 { ret := make([]int32, 0, len(input)) - for _, val := range input { - ret = append(ret, val) - } + ret = append(ret, input...) return ret } @@ -161,6 +159,8 @@ var ( V2_1_0_0 = newKafkaVersion(2, 1, 0, 0) V2_2_0_0 = newKafkaVersion(2, 2, 0, 0) V2_3_0_0 = newKafkaVersion(2, 3, 0, 0) + V2_4_0_0 = newKafkaVersion(2, 4, 0, 0) + V2_5_0_0 = newKafkaVersion(2, 5, 0, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -185,9 +185,11 @@ var ( V2_1_0_0, V2_2_0_0, V2_3_0_0, + V2_4_0_0, + V2_5_0_0, } MinVersion = V0_8_2_0 - MaxVersion = V2_3_0_0 + MaxVersion = V2_5_0_0 ) //ParseKafkaVersion parses and returns kafka version or error from a string diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go index 58880e2b4b5..7c9951acc8a 100644 --- a/vendor/github.com/Shopify/sarama/zstd.go +++ b/vendor/github.com/Shopify/sarama/zstd.go @@ -1,8 +1,9 @@ package sarama import ( - "github.com/klauspost/compress/zstd" "sync" + + "github.com/klauspost/compress/zstd" ) var ( diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go index 911227f6129..0c10c4e9f5f 100644 --- a/vendor/github.com/hashicorp/go-uuid/uuid.go +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -4,22 +4,40 @@ import ( "crypto/rand" "encoding/hex" "fmt" + "io" ) // GenerateRandomBytes is used to generate random bytes of given size. func GenerateRandomBytes(size int) ([]byte, error) { + return GenerateRandomBytesWithReader(size, rand.Reader) +} + +// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader. +func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) { + if reader == nil { + return nil, fmt.Errorf("provided reader is nil") + } buf := make([]byte, size) - if _, err := rand.Read(buf); err != nil { + if _, err := io.ReadFull(reader, buf); err != nil { return nil, fmt.Errorf("failed to read random bytes: %v", err) } return buf, nil } + const uuidLen = 16 // GenerateUUID is used to generate a random UUID func GenerateUUID() (string, error) { - buf, err := GenerateRandomBytes(uuidLen) + return GenerateUUIDWithReader(rand.Reader) +} + +// GenerateUUIDWithReader is used to generate a random UUID with a given Reader +func GenerateUUIDWithReader(reader io.Reader) (string, error) { + if reader == nil { + return "", fmt.Errorf("provided reader is nil") + } + buf, err := GenerateRandomBytesWithReader(uuidLen, reader) if err != nil { return "", err } diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 20c94f59684..d9948ab4098 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -644,7 +644,7 @@ func (d *compressor) init(w io.Writer, level int) (err error) { d.fill = (*compressor).fillBlock d.step = (*compressor).store case level == ConstantCompression: - d.w.logReusePenalty = uint(4) + d.w.logNewTablePenalty = 4 d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillBlock d.step = (*compressor).storeHuff @@ -652,13 +652,13 @@ func (d *compressor) init(w io.Writer, level int) (err error) { level = 5 fallthrough case level >= 1 && level <= 6: - d.w.logReusePenalty = uint(level + 1) + d.w.logNewTablePenalty = 6 d.fast = newFastEnc(level) d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillBlock d.step = (*compressor).storeFast case 7 <= level && level <= 9: - d.w.logReusePenalty = uint(level) + d.w.logNewTablePenalty = 10 d.state = &advancedState{} d.compressionLevel = levels[level] d.initDeflate() diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index dd74ffb8723..9feea87a3d5 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -93,12 +93,12 @@ type huffmanBitWriter struct { err error lastHeader int // Set between 0 (reused block can be up to 2x the size) - logReusePenalty uint - lastHuffMan bool - bytes [256]byte - literalFreq [lengthCodesStart + 32]uint16 - offsetFreq [32]uint16 - codegenFreq [codegenCodeCount]uint16 + logNewTablePenalty uint + lastHuffMan bool + bytes [256]byte + literalFreq [lengthCodesStart + 32]uint16 + offsetFreq [32]uint16 + codegenFreq [codegenCodeCount]uint16 // codegen must have an extra space for the final symbol. codegen [literalCount + offsetCodeCount + 1]uint8 @@ -119,7 +119,7 @@ type huffmanBitWriter struct { // If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. // // An incoming block estimates the output size of a new table using a 'fresh' by calculating the -// optimal size and adding a penalty in 'logReusePenalty'. +// optimal size and adding a penalty in 'logNewTablePenalty'. // A Huffman table is not optimal, which is why we add a penalty, and generating a new table // is slower both for compression and decompression. @@ -349,6 +349,13 @@ func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { int(w.codegenFreq[18])*7, numCodegens } +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) { + size = litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + return size +} + // dynamicSize returns the size of dynamically encoded data in bits. func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { header, numCodegens := w.headerSize() @@ -451,12 +458,12 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n i := 0 for { - var codeWord int = int(w.codegen[i]) + var codeWord = uint32(w.codegen[i]) i++ if codeWord == badCode { break } - w.writeCode(w.codegenEncoding.codes[uint32(codeWord)]) + w.writeCode(w.codegenEncoding.codes[codeWord]) switch codeWord { case 16: @@ -602,14 +609,14 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b var size int // Check if we should reuse. if w.lastHeader > 0 { - // Estimate size for using a new table + // Estimate size for using a new table. + // Use the previous header size as the best estimate. newSize := w.lastHeader + tokens.EstimatedBits() + newSize += newSize >> w.logNewTablePenalty // The estimated size is calculated as an optimal table. // We add a penalty to make it more realistic and re-use a bit more. - newSize += newSize >> (w.logReusePenalty & 31) - extra := w.extraBitSize() - reuseSize, _ := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extra) + reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize() // Check if a new table is better. if newSize < reuseSize { @@ -801,21 +808,30 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { } // Add everything as literals - estBits := histogramSize(input, w.literalFreq[:], !eof && !sync) + 15 + // We have to estimate the header size. + // Assume header is around 70 bytes: + // https://stackoverflow.com/a/25454430 + const guessHeaderSizeBits = 70 * 8 + estBits, estExtra := histogramSize(input, w.literalFreq[:], !eof && !sync) + estBits += w.lastHeader + 15 + if w.lastHeader == 0 { + estBits += guessHeaderSizeBits + } + estBits += estBits >> w.logNewTablePenalty // Store bytes, if we don't get a reasonable improvement. ssize, storable := w.storedSize(input) - if storable && ssize < (estBits+estBits>>4) { + if storable && ssize < estBits { w.writeStoredHeader(len(input), eof) w.writeBytes(input) return } if w.lastHeader > 0 { - size, _ := w.dynamicSize(w.literalEncoding, huffOffset, w.lastHeader) - estBits += estBits >> (w.logReusePenalty) + reuseSize := w.literalEncoding.bitLength(w.literalFreq[:256]) + estBits += estExtra - if estBits < size { + if estBits < reuseSize { // We owe an EOB w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index 1810c6898d0..9d8e81ad690 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -7,7 +7,6 @@ package flate import ( "math" "math/bits" - "sort" ) const ( @@ -25,8 +24,6 @@ type huffmanEncoder struct { codes []hcode freqcache []literalNode bitCount [17]int32 - lns byLiteral // stored to avoid repeated allocation in generate - lfs byFreq // stored to avoid repeated allocation in generate } type literalNode struct { @@ -270,7 +267,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN // assigned in literal order (not frequency order). chunk := list[len(list)-int(bits):] - h.lns.sort(chunk) + sortByLiteral(chunk) for _, node := range chunk { h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} code++ @@ -315,7 +312,7 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { } return } - h.lfs.sort(list) + sortByFreq(list) // Get the number of literals for each bit count bitCount := h.bitCounts(list, maxBits) @@ -323,59 +320,44 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { h.assignEncodingAndSize(bitCount, list) } -type byLiteral []literalNode - -func (s *byLiteral) sort(a []literalNode) { - *s = byLiteral(a) - sort.Sort(s) -} - -func (s byLiteral) Len() int { return len(s) } - -func (s byLiteral) Less(i, j int) bool { - return s[i].literal < s[j].literal -} - -func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type byFreq []literalNode - -func (s *byFreq) sort(a []literalNode) { - *s = byFreq(a) - sort.Sort(s) -} - -func (s byFreq) Len() int { return len(s) } - -func (s byFreq) Less(i, j int) bool { - if s[i].freq == s[j].freq { - return s[i].literal < s[j].literal +func atLeastOne(v float32) float32 { + if v < 1 { + return 1 } - return s[i].freq < s[j].freq + return v } -func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // histogramSize accumulates a histogram of b in h. // An estimated size in bits is returned. // Unassigned values are assigned '1' in the histogram. // len(h) must be >= 256, and h's elements must be all zeroes. -func histogramSize(b []byte, h []uint16, fill bool) int { +func histogramSize(b []byte, h []uint16, fill bool) (int, int) { h = h[:256] for _, t := range b { h[t]++ } - invTotal := 1.0 / float64(len(b)) - shannon := 0.0 - single := math.Ceil(-math.Log2(invTotal)) - for i, v := range h[:] { - if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - } else if fill { - shannon += single - h[i] = 1 + invTotal := 1.0 / float32(len(b)) + shannon := float32(0.0) + var extra float32 + if fill { + oneBits := atLeastOne(-mFastLog2(invTotal)) + for i, v := range h[:] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + } else { + h[i] = 1 + extra += oneBits + } + } + } else { + for _, v := range h[:] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + } } } - return int(shannon + 0.99) + + return int(shannon + 0.99), int(extra + 0.99) } diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go new file mode 100644 index 00000000000..20778029900 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go @@ -0,0 +1,178 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByFreq(data []literalNode) { + n := len(data) + quickSortByFreq(data, 0, n, maxDepth(n)) +} + +func quickSortByFreq(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivotByFreq(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSortByFreq(data, a, mlo, maxDepth) + a = mhi // i.e., quickSortByFreq(data, mhi, b) + } else { + quickSortByFreq(data, mhi, b, maxDepth) + b = mlo // i.e., quickSortByFreq(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSortByFreq(data, a, b) + } +} + +// siftDownByFreq implements the heap property on data[lo, hi). +// first is an offset into the array where the root of the heap lies. +func siftDownByFreq(data []literalNode, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) { + child++ + } + if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} +func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) + medianOfThreeSortByFreq(data, m, m-s, m+s) + medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThreeSortByFreq(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { + } + b := a + for { + for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot + } + for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot + } + for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSortByFreq(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// quickSortByFreq, loosely following Bentley and McIlroy, +// ``Engineering a Sort Function,'' SP&E November 1993. + +// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go new file mode 100644 index 00000000000..93f1aea109e --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go @@ -0,0 +1,201 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByLiteral(data []literalNode) { + n := len(data) + quickSort(data, 0, n, maxDepth(n)) +} + +func quickSort(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivot(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSort(data, a, mlo, maxDepth) + a = mhi // i.e., quickSort(data, mhi, b) + } else { + quickSort(data, mhi, b, maxDepth) + b = mlo // i.e., quickSort(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].literal < data[i-6].literal { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSort(data, a, b) + } +} +func heapSort(data []literalNode, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDown(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDown(data, lo, i, first) + } +} + +// siftDown implements the heap property on data[lo, hi). +// first is an offset into the array where the root of the heap lies. +func siftDown(data []literalNode, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && data[first+child].literal < data[first+child+1].literal { + child++ + } + if data[first+root].literal > data[first+child].literal { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} +func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThree(data, lo, lo+s, lo+2*s) + medianOfThree(data, m, m-s, m+s) + medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThree(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && data[a].literal < data[pivot].literal; a++ { + } + b := a + for { + for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot + } + for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].literal > data[pivot].literal { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot + } + for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSort(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && data[j].literal < data[j-1].literal; j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// maxDepth returns a threshold at which quicksort should switch +// to heapsort. It returns 2*ceil(lg(n+1)). +func maxDepth(n int) int { + var depth int + for i := n; i > 0; i >>= 1 { + depth++ + } + return depth * 2 +} + +// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThree(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].literal < data[m1].literal { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index b3df0d8941e..099c0ddbc42 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -184,9 +184,7 @@ func (t *tokens) indexTokens(in []token) { t.Reset() for _, tok := range in { if tok < matchType { - t.tokens[t.n] = tok - t.litHist[tok]++ - t.n++ + t.AddLiteral(tok.literal()) continue } t.AddMatch(uint32(tok.length()), tok.offset()) @@ -211,43 +209,53 @@ func (t *tokens) AddLiteral(lit byte) { t.nLits++ } +// from https://stackoverflow.com/a/28730362 +func mFastLog2(val float32) float32 { + ux := int32(math.Float32bits(val)) + log2 := (float32)(((ux >> 23) & 255) - 128) + ux &= -0x7f800001 + ux += 127 << 23 + uval := math.Float32frombits(uint32(ux)) + log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 + return log2 +} + // EstimatedBits will return an minimum size estimated by an *optimal* // compression of the block. // The size of the block func (t *tokens) EstimatedBits() int { - shannon := float64(0) + shannon := float32(0) bits := int(0) nMatches := 0 if t.nLits > 0 { - invTotal := 1.0 / float64(t.nLits) + invTotal := 1.0 / float32(t.nLits) for _, v := range t.litHist[:] { if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) + n := float32(v) + shannon += -mFastLog2(n*invTotal) * n } } // Just add 15 for EOB shannon += 15 - for _, v := range t.extraHist[1 : literalCount-256] { + for i, v := range t.extraHist[1 : literalCount-256] { if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - bits += int(lengthExtraBits[v&31]) * int(v) + n := float32(v) + shannon += -mFastLog2(n*invTotal) * n + bits += int(lengthExtraBits[i&31]) * int(v) nMatches += int(v) } } } if nMatches > 0 { - invTotal := 1.0 / float64(nMatches) - for _, v := range t.offHist[:offsetCodeCount] { + invTotal := 1.0 / float32(nMatches) + for i, v := range t.offHist[:offsetCodeCount] { if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - bits += int(offsetExtraBits[v&31]) * int(n) + n := float32(v) + shannon += -mFastLog2(n*invTotal) * n + bits += int(offsetExtraBits[i&31]) * int(v) } } } - return int(shannon) + bits } diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index ec0c3fc53b5..bda4021efd3 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -38,7 +38,7 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { b.nBits += bits } -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) encSymbol(ct cTable, symbol byte) { enc := ct[symbol] @@ -46,6 +46,17 @@ func (b *bitWriter) encSymbol(ct cTable, symbol byte) { b.nBits += enc.nBits } +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + b.nBits += encA.nBits + encB.nBits +} + // addBits16ZeroNC will add up to 16 bits. // It will not check if there is space for them, // so the caller must ensure that it has flushed recently. diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 51e00aaeb2f..0843cb014ff 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -80,9 +80,12 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error) if s.Reuse == ReusePolicyPrefer && canReuse { keepTable := s.cTable + keepTL := s.actualTableLog s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog s.Out, err = compressor(in) s.cTable = keepTable + s.actualTableLog = keepTL if err == nil && len(s.Out) < wantSize { s.OutData = s.Out return s.Out, true, nil @@ -92,7 +95,6 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error) } // Calculate new table. - s.optimalTableLog() err = s.buildCTable() if err != nil { return nil, false, err @@ -109,9 +111,15 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error) if oldSize <= hSize+newSize || hSize+12 >= wantSize { // Retain cTable even if we re-use. keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog s.Out, err = compressor(in) + + // Restore ctable. s.cTable = keepTable + s.actualTableLog = keepTL if err != nil { return nil, false, err } @@ -142,7 +150,7 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error) return nil, false, ErrIncompressible } // Move current table into previous. - s.prevTable, s.cTable = s.cTable, s.prevTable[:0] + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] s.OutData = s.Out[len(s.OutTable):] return s.Out, false, nil } @@ -163,28 +171,23 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { for i := len(src) & 3; i > 0; i-- { bw.encSymbol(cTable, src[n+i-1]) } + n -= 4 if s.actualTableLog <= 8 { - n -= 4 for ; n >= 0; n -= 4 { tmp := src[n : n+4] // tmp should be len 4 bw.flush32() - bw.encSymbol(cTable, tmp[3]) - bw.encSymbol(cTable, tmp[2]) - bw.encSymbol(cTable, tmp[1]) - bw.encSymbol(cTable, tmp[0]) + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) } } else { - n -= 4 for ; n >= 0; n -= 4 { tmp := src[n : n+4] // tmp should be len 4 bw.flush32() - bw.encSymbol(cTable, tmp[3]) - bw.encSymbol(cTable, tmp[2]) + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) bw.flush32() - bw.encSymbol(cTable, tmp[1]) - bw.encSymbol(cTable, tmp[0]) + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) } } err := bw.close() @@ -322,9 +325,26 @@ func (s *Scratch) canUseTable(c cTable) bool { return true } +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + // minTableLog provides the minimum logSize to safely represent a distribution. func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.br.remain()-1)) + 1 + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 if minBitsSrc < minBitsSymbols { return uint8(minBitsSrc) @@ -336,7 +356,7 @@ func (s *Scratch) minTableLog() uint8 { func (s *Scratch) optimalTableLog() { tableLog := s.TableLog minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 2 + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 if maxBitsSrc < tableLog { // Accuracy can be reduced tableLog = maxBitsSrc @@ -363,6 +383,7 @@ type cTableEntry struct { const huffNodesMask = huffNodesLen - 1 func (s *Scratch) buildCTable() error { + s.optimalTableLog() s.huffSort() if cap(s.cTable) < maxSymbolValue+1 { s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) @@ -439,7 +460,7 @@ func (s *Scratch) buildCTable() error { return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) } var nbPerRank [tableLogMax + 1]uint16 - var valPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 for _, v := range huffNode[:nonNullRank+1] { nbPerRank[v.nbBits]++ } @@ -455,16 +476,17 @@ func (s *Scratch) buildCTable() error { } // push nbBits per symbol, symbol order - // TODO: changed `s.symbolLen` -> `nonNullRank+1` (micro-opt) for _, v := range huffNode[:nonNullRank+1] { s.cTable[v.symbol].nBits = v.nbBits } // assign value within rank, symbol order - for n, val := range s.cTable[:s.symbolLen] { - v := valPerRank[val.nBits] - s.cTable[n].val = v - valPerRank[val.nBits] = v + 1 + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 } return nil @@ -488,10 +510,12 @@ func (s *Scratch) huffSort() { r := highBit32(v+1) & 31 rank[r].base++ } - for n := 30; n > 0; n-- { + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { rank[n-1].base += rank[n].base } - for n := range rank[:] { + for n := range rank[:maxBitLength] { rank[n].current = rank[n].base } for n, c := range s.count[:s.symbolLen] { @@ -510,7 +534,7 @@ func (s *Scratch) huffSort() { } func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { - maxNbBits := s.TableLog + maxNbBits := s.actualTableLog huffNode := s.nodes[1 : huffNodesLen+1] //huffNode = huffNode[: huffNodesLen] diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 6bc23bbf00f..53249df0561 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -83,7 +83,7 @@ type Scratch struct { MaxSymbolValue uint8 // TableLog will attempt to override the tablelog for the next block. - // Must be <= 11. + // Must be <= 11 and >= 5. TableLog uint8 // Reuse will specify the reuse policy @@ -105,6 +105,7 @@ type Scratch struct { maxCount int // count of the most probable symbol clearCount bool // clear count actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table prevTable cTable // Table used for previous compression. cTable cTable // compression table dt dTable // decompression table @@ -127,8 +128,8 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) { if s.TableLog == 0 { s.TableLog = tableLogDefault } - if s.TableLog > tableLogMax { - return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, tableLogMax) + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) } if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { s.MaxDecodedSize = BlockSizeMax diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index d9d38b23f16..bc977a30234 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -26,13 +26,17 @@ Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd ### Status: -BETA - there may still be subtle bugs, but a wide variety of content has been tested. -There may still be implementation specific stuff in regards to error handling that could lead to edge cases. +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. The "Fastest" compression ratio is roughly equivalent to zstd level 1. -The "Default" compression ration is roughly equivalent to zstd level 3 (default). +The "Default" compression ratio is roughly equivalent to zstd level 3 (default). In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. The compression ratio compared to stdlib is around level 3, but usually 3x as fast. @@ -251,8 +255,12 @@ The converter `s` can be reused to avoid allocations, even after errors. ## Decompressor -STATUS: Release Candidate - there may still be subtle bugs, but a wide variety of content has been tested. +Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. ### Usage @@ -382,4 +390,4 @@ For sending files for reproducing errors use a service like [goobox](https://goo For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). -This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. \ No newline at end of file +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 47cc21d6d3d..ed670bcc7ad 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -89,6 +89,7 @@ type blockDec struct { sequenceBuf []seq tmp [4]byte err error + decWG sync.WaitGroup } func (b *blockDec) String() string { @@ -105,6 +106,7 @@ func newBlockDec(lowMem bool) *blockDec { input: make(chan struct{}, 1), history: make(chan *history, 1), } + b.decWG.Add(1) go b.startDecoder() return &b } @@ -183,11 +185,13 @@ func (b *blockDec) Close() { close(b.input) close(b.history) close(b.result) + b.decWG.Wait() } // decodeAsync will prepare decoding the block when it receives input. // This will separate output and history. func (b *blockDec) startDecoder() { + defer b.decWG.Done() for range b.input { //println("blockDec: Got block input") switch b.Type { diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 8383279d272..507757d525a 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -299,14 +299,28 @@ func (b *blockEnc) encodeRaw(a []byte) { } } +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debug { + println("Adding RAW block, length", len(src)) + } + return dst +} + // encodeLits can be used if the block is only litLen. -func (b *blockEnc) encodeLits() error { +func (b *blockEnc) encodeLits(raw bool) error { var bh blockHeader bh.setLast(b.last) bh.setSize(uint32(len(b.literals))) // Don't compress extremely small blocks - if len(b.literals) < 32 { + if len(b.literals) < 32 || raw { if debug { println("Adding RAW block, length", len(b.literals)) } @@ -324,18 +338,10 @@ func (b *blockEnc) encodeLits() error { if len(b.literals) >= 1024 { // Use 4 Streams. out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - if len(out) > len(b.literals)-len(b.literals)>>4 { - // Bail out of compression is too little. - err = huff0.ErrIncompressible - } } else if len(b.literals) > 32 { // Use 1 stream single = true out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) - if len(out) > len(b.literals)-len(b.literals)>>4 { - // Bail out of compression is too little. - err = huff0.ErrIncompressible - } } else { err = huff0.ErrIncompressible } @@ -437,10 +443,10 @@ func fuzzFseEncoder(data []byte) int { return 1 } -// encode will encode the block and put the output in b.output. -func (b *blockEnc) encode() error { +// encode will encode the block and append the output in b.output. +func (b *blockEnc) encode(raw bool) error { if len(b.sequences) == 0 { - return b.encodeLits() + return b.encodeLits(raw) } // We want some difference if len(b.literals) > (b.size - (b.size >> 5)) { @@ -451,6 +457,8 @@ func (b *blockEnc) encode() error { var lh literalsHeader bh.setLast(b.last) bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) b.output = bh.appendTo(b.output) var ( @@ -458,16 +466,17 @@ func (b *blockEnc) encode() error { reUsed, single bool err error ) - if len(b.literals) >= 1024 { + if len(b.literals) >= 1024 && !raw { // Use 4 Streams. out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 32 { + } else if len(b.literals) > 32 && !raw { // Use 1 stream single = true out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) } else { err = huff0.ErrIncompressible } + switch err { case huff0.ErrIncompressible: lh.setType(literalsBlockRaw) @@ -735,18 +744,18 @@ func (b *blockEnc) encode() error { } b.output = wr.out - if len(b.output)-3 >= b.size { + if len(b.output)-3-bhOffset >= b.size { // Maybe even add a bigger margin. b.litEnc.Reuse = huff0.ReusePolicyNone return errIncompressible } // Size is output minus block header. - bh.setSize(uint32(len(b.output)) - 3) + bh.setSize(uint32(len(b.output)-bhOffset) - 3) if debug { println("Rewriting block header", bh) } - _ = bh.appendTo(b.output[:0]) + _ = bh.appendTo(b.output[bhOffset:bhOffset]) b.coders.setPrev(llEnc, mlEnc, ofEnc) return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 1de94eef0c0..35a3cda9140 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -388,6 +388,35 @@ func (d *Decoder) Close() { d.current.err = ErrDecoderClosed } +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + type decodeOutput struct { d *blockDec b []byte diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 2f41bcd0d5b..ee3b09b02a8 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -411,3 +411,316 @@ encodeLoop: println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur > (1<<30)+e.maxMatchOff { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + stepSize := int32(e.o.targetLength) + if stepSize == 0 { + stepSize++ + } + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debug && s <= t { + panic("s <= t") + } + if debug && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debug && s <= t { + panic("s <= t") + } + if debug && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debug && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debug && s <= t { + panic("s <= t") + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv1>>8, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 6f388de041f..0bdddac5b4d 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -329,6 +329,246 @@ encodeLoop: } } +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debug { + if len(src) > maxBlockSize { + panic("src too big") + } + } + // Protect against e.cur wraparound. + if e.cur > (1<<30)+e.maxMatchOff { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + // lenght := 4 + e.matchlen(s+6, repIndex+4, src) + lenght := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debug && s <= t { + panic("s <= t") + } + if debug && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debug && s <= t { + panic("s <= t") + } + if debug && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debug && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debug && s <= t { + panic("s <= t") + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlenNoHist(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlenNoHist(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + func (e *fastEncoder) addBlock(src []byte) int32 { // check if we have space already if len(e.hist)+len(src) > cap(e.hist) { @@ -362,6 +602,11 @@ func (e *fastEncoder) UseBlock(enc *blockEnc) { e.blk = enc } +func (e *fastEncoder) matchlenNoHist(s, t int32, src []byte) int32 { + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + func (e *fastEncoder) matchlen(s, t int32, src []byte) int32 { if debug { if s < 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index d79188271a8..366dd66bde9 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -29,6 +29,7 @@ type Encoder struct { type encoder interface { Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) Block() *blockEnc CRC() *xxhash.Digest AppendCRC([]byte) []byte @@ -262,7 +263,7 @@ func (e *Encoder) nextBlock(final bool) error { // If we got the exact same number of literals as input, // assume the literals cannot be compressed. if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode() + err = blk.encode(e.o.noEntropy) } switch err { case errIncompressible: @@ -433,7 +434,8 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { }() enc.Reset() blk := enc.Block() - single := len(src) > 1<<20 + // Use single segments when above minimum window and below 1MB. + single := len(src) < 1<<20 && len(src) > MinWindowSize if e.o.single != nil { single = *e.o.single } @@ -454,26 +456,23 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { panic(err) } - for len(src) > 0 { - todo := src - if len(todo) > e.o.blockSize { - todo = todo[:e.o.blockSize] - } - src = src[len(todo):] + if len(src) <= e.o.blockSize && len(src) <= maxBlockSize { + // Slightly faster with no history and everything in one block. if e.o.crc { - _, _ = enc.CRC().Write(todo) + _, _ = enc.CRC().Write(src) } blk.reset(nil) - blk.pushOffsets() - enc.Encode(blk, todo) - if len(src) == 0 { - blk.last = true - } - err := errIncompressible + blk.last = true + enc.EncodeNoHist(blk, src) + // If we got the exact same number of literals as input, // assume the literals cannot be compressed. - if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { - err = blk.encode() + err := errIncompressible + oldout := blk.output + if len(blk.literals) != len(src) || len(src) != e.o.blockSize { + // Output directly to dst + blk.output = dst + err = blk.encode(e.o.noEntropy) } switch err { @@ -481,13 +480,49 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if debug { println("Storing incompressible block as raw") } - blk.encodeRaw(todo) - blk.popOffsets() + dst = blk.encodeRawTo(dst, src) case nil: + dst = blk.output default: panic(err) } - dst = append(dst, blk.output...) + blk.output = oldout + } else { + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.reset(nil) + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode(e.o.noEntropy) + } + + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, todo) + blk.popOffsets() + case nil: + dst = append(dst, blk.output...) + default: + panic(err) + } + } } if e.o.crc { dst = enc.AppendCRC(dst) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 0f83a325a98..40eb457331a 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -20,6 +20,7 @@ type encoderOptions struct { windowSize int level EncoderLevel fullZero bool + noEntropy bool } func (o *encoderOptions) setDefault() { @@ -202,6 +203,16 @@ func WithZeroFrames(b bool) EOption { } } +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + // WithSingleSegment will set the "single segment" flag when EncodeAll is used. // If this flag is set, data must be regenerated within a single continuous memory segment. // In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index a048818f9a4..356956ba256 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -111,7 +111,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { // Add empty last block r.block.reset(nil) r.block.last = true - err := r.block.encodeLits() + err := r.block.encodeLits(false) if err != nil { return written, err } @@ -178,7 +178,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { r.err = ErrSnappyCorrupt return written, r.err } - err = r.block.encode() + err = r.block.encode(false) switch err { case errIncompressible: r.block.popOffsets() @@ -188,7 +188,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { println("snappy.Decode:", err) return written, err } - err = r.block.encodeLits() + err = r.block.encodeLits(false) if err != nil { return written, err } @@ -235,7 +235,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { r.err = ErrSnappyCorrupt return written, r.err } - err := r.block.encodeLits() + err := r.block.encodeLits(false) if err != nil { return written, err } diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md index 00899fb7e04..4ee388e81bf 100644 --- a/vendor/github.com/pierrec/lz4/README.md +++ b/vendor/github.com/pierrec/lz4/README.md @@ -83,23 +83,8 @@ Contributions are very welcome for bug fixing, performance improvements...! ## Contributors -Thanks to all contributors so far: - -- [@klauspost](https://github.com/klauspost) -- [@heidawei](https://github.com/heidawei) -- [@x4m](https://github.com/x4m) -- [@Zariel](https://github.com/Zariel) -- [@edwingeng](https://github.com/edwingeng) -- [@danielmoy-google](https://github.com/danielmoy-google) -- [@honda-tatsuya](https://github.com/honda-tatsuya) -- [@h8liu](https://github.com/h8liu) -- [@sbinet](https://github.com/sbinet) -- [@fingon](https://github.com/fingon) -- [@emfree](https://github.com/emfree) -- [@lhemala](https://github.com/lhemala) -- [@connor4312](https://github.com/connor4312) -- [@oov](https://github.com/oov) -- [@arya](https://github.com/arya) -- [@ikkeps](https://github.com/ikkeps) - -Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder +Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! + +Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. + +Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go index 5755cda2460..b589af46787 100644 --- a/vendor/github.com/pierrec/lz4/block.go +++ b/vendor/github.com/pierrec/lz4/block.go @@ -40,7 +40,10 @@ func UncompressBlock(src, dst []byte) (int, error) { // The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. // // An error is returned if the destination buffer is too small. -func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { +func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { + if len(hashTable) < htSize { + return 0, fmt.Errorf("hash table too small, should be at least %d in size", htSize) + } defer recoverBlock(&err) // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. @@ -51,16 +54,13 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { if sn <= 0 || dn == 0 { return 0, nil } - if len(hashTable) < htSize { - return 0, fmt.Errorf("hash table too small, should be at least %d in size", htSize) - } // Prove to the compiler the table has at least htSize elements. // The compiler can see that "uint32() >> hashShift" cannot be out of bounds. hashTable = hashTable[:htSize] // si: Current position of the search. // anchor: Position of the current literals. - var si, anchor int + var si, di, anchor int // Fast scan strategy: the hash table only stores the last 4 bytes sequences. for si < sn { @@ -124,7 +124,7 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { si, mLen = si+mLen, si+minMatch // Find the longest match by looking by batches of 8 bytes. - for si < sn { + for si+8 < sn { x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) if x == 0 { si += 8 @@ -227,7 +227,7 @@ func blockHashHC(x uint32) uint32 { // The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. // // An error is returned if the destination buffer is too small. -func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { +func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { defer recoverBlock(&err) // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. @@ -239,7 +239,7 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { if sn <= 0 || dn == 0 { return 0, nil } - var si int + var si, di int // hashTable: stores the last position found for a given hash // chainTable: stores previous positions for a given hash diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go index 002519f3e7f..919888edf7d 100644 --- a/vendor/github.com/pierrec/lz4/decode_other.go +++ b/vendor/github.com/pierrec/lz4/decode_other.go @@ -19,7 +19,7 @@ func decodeBlock(dst, src []byte) (ret int) { // Literals. if lLen := b >> 4; lLen > 0 { switch { - case lLen < 0xF && di+18 < len(dst) && si+16 < len(src): + case lLen < 0xF && si+16 < len(src): // Shortcut 1 // if we have enough room in src and dst, and the literals length // is small enough (0..14) then copy all 16 bytes, even if not all @@ -34,7 +34,13 @@ func decodeBlock(dst, src []byte) (ret int) { mLen += 4 if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset { i := di - offset - copy(dst[di:], dst[i:i+18]) + end := i + 18 + if end > len(dst) { + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + end = len(dst) + } + copy(dst[di:], dst[i:end]) si += 2 di += mLen continue diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go index 3e278945ec8..1c45d1813ce 100644 --- a/vendor/github.com/pierrec/lz4/errors.go +++ b/vendor/github.com/pierrec/lz4/errors.go @@ -15,6 +15,8 @@ var ( ErrInvalid = errors.New("lz4: bad magic number") // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency. ErrBlockDependency = errors.New("lz4: block dependency not supported") + // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position. + ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent") ) func recoverBlock(e *error) { diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go index cdbf9611f48..29864d8fd57 100644 --- a/vendor/github.com/pierrec/lz4/lz4.go +++ b/vendor/github.com/pierrec/lz4/lz4.go @@ -10,6 +10,10 @@ // package lz4 +import "math/bits" + +import "sync" + const ( // Extension is the LZ4 frame file name extension Extension = ".lz4" @@ -34,22 +38,61 @@ const ( hashLog = 16 htSize = 1 << hashLog - mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. + mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. ) // map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. const ( - blockSize64K = 64 << 10 - blockSize256K = 256 << 10 - blockSize1M = 1 << 20 - blockSize4M = 4 << 20 + blockSize64K = 1 << (16 + 2*iota) + blockSize256K + blockSize1M + blockSize4M ) var ( - bsMapID = map[byte]int{4: blockSize64K, 5: blockSize256K, 6: blockSize1M, 7: blockSize4M} - bsMapValue = map[int]byte{blockSize64K: 4, blockSize256K: 5, blockSize1M: 6, blockSize4M: 7} + // Keep a pool of buffers for each valid block sizes. + bsMapValue = [...]*sync.Pool{ + newBufferPool(2 * blockSize64K), + newBufferPool(2 * blockSize256K), + newBufferPool(2 * blockSize1M), + newBufferPool(2 * blockSize4M), + } ) +// newBufferPool returns a pool for buffers of the given size. +func newBufferPool(size int) *sync.Pool { + return &sync.Pool{ + New: func() interface{} { + return make([]byte, size) + }, + } +} + +// getBuffer returns a buffer to its pool. +func getBuffer(size int) []byte { + idx := blockSizeValueToIndex(size) - 4 + return bsMapValue[idx].Get().([]byte) +} + +// putBuffer returns a buffer to its pool. +func putBuffer(size int, buf []byte) { + if cap(buf) > 0 { + idx := blockSizeValueToIndex(size) - 4 + bsMapValue[idx].Put(buf[:cap(buf)]) + } +} +func blockSizeIndexToValue(i byte) int { + return 1 << (16 + 2*uint(i)) +} +func isValidBlockSize(size int) bool { + const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M + + return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1 +} +func blockSizeValueToIndex(size int) byte { + return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2) +} + // Header describes the various flags that can be set on a Writer or obtained from a Reader. // The default values match those of the LZ4 frame format definition // (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). @@ -64,3 +107,7 @@ type Header struct { CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). done bool // Header processed flag (Read or Write and checked). } + +func (h *Header) Reset() { + h.done = false +} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go index 90e8efe2eb0..87dd72bd0db 100644 --- a/vendor/github.com/pierrec/lz4/reader.go +++ b/vendor/github.com/pierrec/lz4/reader.go @@ -25,6 +25,8 @@ type Reader struct { data []byte // Uncompressed data. idx int // Index of unread bytes into data. checksum xxh32.XXHZero // Frame hash. + skip int64 // Bytes to skip before next read. + dpos int64 // Position in dest } // NewReader returns a new LZ4 frame decoder. @@ -86,10 +88,10 @@ func (z *Reader) readHeader(first bool) error { z.NoChecksum = b>>2&1 == 0 bmsID := buf[1] >> 4 & 0x7 - bSize, ok := bsMapID[bmsID] - if !ok { + if bmsID < 4 || bmsID > 7 { return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID) } + bSize := blockSizeIndexToValue(bmsID - 4) z.BlockMaxSize = bSize // Allocate the compressed/uncompressed buffers. @@ -275,8 +277,20 @@ func (z *Reader) Read(buf []byte) (int, error) { z.idx = 0 } + if z.skip > int64(len(z.data[z.idx:])) { + z.skip -= int64(len(z.data[z.idx:])) + z.dpos += int64(len(z.data[z.idx:])) + z.idx = len(z.data) + return 0, nil + } + + z.idx += int(z.skip) + z.dpos += z.skip + z.skip = 0 + n := copy(buf, z.data[z.idx:]) z.idx += n + z.dpos += int64(n) if debugFlag { debug("copied %d bytes to input", n) } @@ -284,6 +298,20 @@ func (z *Reader) Read(buf []byte) (int, error) { return n, nil } +// Seek implements io.Seeker, but supports seeking forward from the current +// position only. Any other seek will return an error. Allows skipping output +// bytes which aren't needed, which in some scenarios is faster than reading +// and discarding them. +// Note this may cause future calls to Read() to read 0 bytes if all of the +// data they would have returned is skipped. +func (z *Reader) Seek(offset int64, whence int) (int64, error) { + if offset < 0 || whence != io.SeekCurrent { + return z.dpos + z.skip, ErrUnsupportedSeek + } + z.skip += offset + return z.dpos + z.skip, nil +} + // Reset discards the Reader's state and makes it equivalent to the // result of its original state from NewReader, but reading from r instead. // This permits reusing a Reader rather than allocating a new one. diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go index 804a68cc258..324f1386b8a 100644 --- a/vendor/github.com/pierrec/lz4/writer.go +++ b/vendor/github.com/pierrec/lz4/writer.go @@ -3,11 +3,18 @@ package lz4 import ( "encoding/binary" "fmt" - "io" - "github.com/pierrec/lz4/internal/xxh32" + "io" + "runtime" ) +// zResult contains the results of compressing a block. +type zResult struct { + size uint32 // Block header + data []byte // Compressed data + checksum uint32 // Data checksum +} + // Writer implements the LZ4 frame encoder. type Writer struct { Header @@ -18,10 +25,13 @@ type Writer struct { buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes dst io.Writer // Destination. checksum xxh32.XXHZero // Frame checksum. - zdata []byte // Compressed data. - data []byte // Data to be compressed. + data []byte // Data to be compressed + buffer for compressed data. idx int // Index into data. hashtable [winSize]int // Hash table used in CompressBlock(). + + // For concurrency. + c chan chan zResult // Channel for block compression goroutines and writer goroutine. + err error // Any error encountered while writing to the underlying destination. } // NewWriter returns a new LZ4 frame encoder. @@ -29,28 +39,92 @@ type Writer struct { // The supplied Header is checked at the first Write. // It is ok to change it before the first Write but then not until a Reset() is performed. func NewWriter(dst io.Writer) *Writer { - return &Writer{dst: dst} + z := new(Writer) + z.Reset(dst) + return z +} + +// WithConcurrency sets the number of concurrent go routines used for compression. +// A negative value sets the concurrency to GOMAXPROCS. +func (z *Writer) WithConcurrency(n int) *Writer { + switch { + case n == 0 || n == 1: + z.c = nil + return z + case n < 0: + n = runtime.GOMAXPROCS(0) + } + z.c = make(chan chan zResult, n) + // Writer goroutine managing concurrent block compression goroutines. + go func() { + // Process next block compression item. + for c := range z.c { + // Read the next compressed block result. + // Waiting here ensures that the blocks are output in the order they were sent. + // The incoming channel is always closed as it indicates to the caller that + // the block has been processed. + res := <-c + n := len(res.data) + if n == 0 { + // Notify the block compression routine that we are done with its result. + // This is used when a sentinel block is sent to terminate the compression. + close(c) + return + } + // Write the block. + if err := z.writeUint32(res.size); err != nil && z.err == nil { + z.err = err + } + if _, err := z.dst.Write(res.data); err != nil && z.err == nil { + z.err = err + } + if z.BlockChecksum { + if err := z.writeUint32(res.checksum); err != nil && z.err == nil { + z.err = err + } + } + if isCompressed := res.size&compressedBlockFlag == 0; isCompressed { + // It is now safe to release the buffer as no longer in use by any goroutine. + putBuffer(cap(res.data), res.data) + } + if h := z.OnBlockDone; h != nil { + h(n) + } + close(c) + } + }() + return z +} + +// newBuffers instantiates new buffers which size matches the one in Header. +// The returned buffers are for decompression and compression respectively. +func (z *Writer) newBuffers() { + bSize := z.Header.BlockMaxSize + buf := getBuffer(bSize) + z.data = buf[:bSize] // Uncompressed buffer is the first half. +} + +// freeBuffers puts the writer's buffers back to the pool. +func (z *Writer) freeBuffers() { + // Put the buffer back into the pool, if any. + putBuffer(z.Header.BlockMaxSize, z.data) + z.data = nil } // writeHeader builds and writes the header (magic+header) to the underlying io.Writer. func (z *Writer) writeHeader() error { // Default to 4Mb if BlockMaxSize is not set. if z.Header.BlockMaxSize == 0 { - z.Header.BlockMaxSize = bsMapID[7] + z.Header.BlockMaxSize = blockSize4M } // The only option that needs to be validated. bSize := z.Header.BlockMaxSize - bSizeID, ok := bsMapValue[bSize] - if !ok { + if !isValidBlockSize(z.Header.BlockMaxSize) { return fmt.Errorf("lz4: invalid block max size: %d", bSize) } // Allocate the compressed/uncompressed buffers. // The compressed buffer cannot exceed the uncompressed one. - if n := 2 * bSize; cap(z.zdata) < n { - z.zdata = make([]byte, n, n) - } - z.data = z.zdata[:bSize] - z.zdata = z.zdata[:cap(z.zdata)][bSize:] + z.newBuffers() z.idx = 0 // Size is optional. @@ -70,7 +144,7 @@ func (z *Writer) writeHeader() error { flg |= 1 << 2 } buf[4] = flg - buf[5] = bSizeID << 4 + buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4 // Current buffer size: magic(4) + flags(1) + block max size (1). n := 6 @@ -150,28 +224,34 @@ func (z *Writer) Write(buf []byte) (int, error) { // compressBlock compresses a block. func (z *Writer) compressBlock(data []byte) error { if !z.NoChecksum { - z.checksum.Write(data) + _, _ = z.checksum.Write(data) } + if z.c != nil { + c := make(chan zResult) + z.c <- c // Send now to guarantee order + go writerCompressBlock(c, z.Header, data) + return nil + } + + zdata := z.data[z.Header.BlockMaxSize:cap(z.data)] // The compressed block size cannot exceed the input's. var zn int - var err error if level := z.Header.CompressionLevel; level != 0 { - zn, err = CompressBlockHC(data, z.zdata, level) + zn, _ = CompressBlockHC(data, zdata, level) } else { - zn, err = CompressBlock(data, z.zdata, z.hashtable[:]) + zn, _ = CompressBlock(data, zdata, z.hashtable[:]) } - var zdata []byte var bLen uint32 if debugFlag { debug("block compression %d => %d", len(data), zn) } - if err == nil && zn > 0 && zn < len(data) { + if zn > 0 && zn < len(data) { // Compressible and compressed size smaller than uncompressed: ok! bLen = uint32(zn) - zdata = z.zdata[:zn] + zdata = zdata[:zn] } else { // Uncompressed block. bLen = uint32(len(data)) | compressedBlockFlag @@ -218,13 +298,35 @@ func (z *Writer) Flush() error { return nil } - if err := z.compressBlock(z.data[:z.idx]); err != nil { - return err - } + data := z.data[:z.idx] z.idx = 0 + if z.c == nil { + return z.compressBlock(data) + } + if !z.NoChecksum { + _, _ = z.checksum.Write(data) + } + c := make(chan zResult) + z.c <- c + writerCompressBlock(c, z.Header, data) return nil } +func (z *Writer) close() error { + if z.c == nil { + return nil + } + // Send a sentinel block (no data to compress) to terminate the writer main goroutine. + c := make(chan zResult) + z.c <- c + c <- zResult{} + // Wait for the main goroutine to complete. + <-c + // At this point the main goroutine has shut down or is about to return. + z.c = nil + return z.err +} + // Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. func (z *Writer) Close() error { if !z.Header.done { @@ -235,6 +337,10 @@ func (z *Writer) Close() error { if err := z.Flush(); err != nil { return err } + if err := z.close(); err != nil { + return err + } + z.freeBuffers() if debugFlag { debug("writing last empty block") @@ -256,12 +362,15 @@ func (z *Writer) Close() error { // initial state from NewWriter, but instead writing to w. // No access to the underlying io.Writer is performed. func (z *Writer) Reset(w io.Writer) { - z.Header = Header{} + n := cap(z.c) + _ = z.close() + z.freeBuffers() + z.Header.Reset() z.dst = w z.checksum.Reset() - z.zdata = z.zdata[:0] - z.data = z.data[:0] z.idx = 0 + z.err = nil + z.WithConcurrency(n) } // writeUint32 writes a uint32 to the underlying writer. @@ -271,3 +380,29 @@ func (z *Writer) writeUint32(x uint32) error { _, err := z.dst.Write(buf) return err } + +// writerCompressBlock compresses data into a pooled buffer and writes its result +// out to the input channel. +func writerCompressBlock(c chan zResult, header Header, data []byte) { + zdata := getBuffer(header.BlockMaxSize) + // The compressed block size cannot exceed the input's. + var zn int + if level := header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + var hashTable [winSize]int + zn, _ = CompressBlock(data, zdata, hashTable[:]) + } + var res zResult + if zn > 0 && zn < len(data) { + res.size = uint32(zn) + res.data = zdata[:zn] + } else { + res.size = uint32(len(data)) | compressedBlockFlag + res.data = data + } + if header.BlockChecksum { + res.checksum = xxh32.ChecksumZero(res.data) + } + c <- res +} diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml index 117763e6509..aead0765480 100644 --- a/vendor/github.com/rcrowley/go-metrics/.travis.yml +++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml @@ -1,13 +1,16 @@ language: go go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 + - "1.3" + - "1.4" + - "1.5" + - "1.6" + - "1.7" + - "1.8" + - "1.9" + - "1.10" + - "1.11" + - "1.12" script: - ./validate.sh diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md index b7356b5fc1f..27ddfee8b89 100644 --- a/vendor/github.com/rcrowley/go-metrics/README.md +++ b/vendor/github.com/rcrowley/go-metrics/README.md @@ -157,6 +157,7 @@ Publishing Metrics Clients are available for the following destinations: +* AppOptics - https://github.com/ysamlan/go-metrics-appoptics * Librato - https://github.com/mihasya/go-metrics-librato * Graphite - https://github.com/cyberdelia/go-metrics-graphite * InfluxDB - https://github.com/vrischmann/go-metrics-influxdb @@ -166,3 +167,5 @@ Clients are available for the following destinations: * SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx * Honeycomb - https://github.com/getspine/go-metrics-honeycomb * Wavefront - https://github.com/wavefrontHQ/go-metrics-wavefront +* Open-Falcon - https://github.com/g4zhuj/go-metrics-falcon +* AWS CloudWatch - [https://github.com/savaki/cloudmetrics](https://github.com/savaki/cloudmetrics) diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go index 043ccefab61..179e5aae729 100644 --- a/vendor/github.com/rcrowley/go-metrics/debug.go +++ b/vendor/github.com/rcrowley/go-metrics/debug.go @@ -2,6 +2,7 @@ package metrics import ( "runtime/debug" + "sync" "time" ) @@ -16,7 +17,8 @@ var ( } ReadGCStats Timer } - gcStats debug.GCStats + gcStats debug.GCStats + registerDebugMetricsOnce = sync.Once{} ) // Capture new values for the Go garbage collector statistics exported in @@ -54,19 +56,21 @@ func CaptureDebugGCStatsOnce(r Registry) { // debug.GCStats. The metrics are named by their fully-qualified Go symbols, // i.e. debug.GCStats.PauseTotal. func RegisterDebugGCStats(r Registry) { - debugMetrics.GCStats.LastGC = NewGauge() - debugMetrics.GCStats.NumGC = NewGauge() - debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) - //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) - debugMetrics.GCStats.PauseTotal = NewGauge() - debugMetrics.ReadGCStats = NewTimer() + registerDebugMetricsOnce.Do(func() { + debugMetrics.GCStats.LastGC = NewGauge() + debugMetrics.GCStats.NumGC = NewGauge() + debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) + //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) + debugMetrics.GCStats.PauseTotal = NewGauge() + debugMetrics.ReadGCStats = NewTimer() - r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) - r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) - r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) - //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) - r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) - r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) + r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) + r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) + r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) + //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) + r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) + r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) + }) } // Allocate an initial slice for gcStats.Pause to avoid allocations during diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go index f8074c04576..2614a0a33eb 100644 --- a/vendor/github.com/rcrowley/go-metrics/log.go +++ b/vendor/github.com/rcrowley/go-metrics/log.go @@ -8,17 +8,37 @@ type Logger interface { Printf(format string, v ...interface{}) } +// Log outputs each metric in the given registry periodically using the given logger. func Log(r Registry, freq time.Duration, l Logger) { LogScaled(r, freq, time.Nanosecond, l) } -// Output each metric in the given registry periodically using the given +// LogOnCue outputs each metric in the given registry on demand through the channel +// using the given logger +func LogOnCue(r Registry, ch chan interface{}, l Logger) { + LogScaledOnCue(r, ch, time.Nanosecond, l) +} + +// LogScaled outputs each metric in the given registry periodically using the given // logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { + ch := make(chan interface{}) + go func(channel chan interface{}) { + for _ = range time.Tick(freq) { + channel <- struct{}{} + } + }(ch) + LogScaledOnCue(r, ch, scale, l) +} + +// LogScaledOnCue outputs each metric in the given registry on demand through the channel +// using the given logger. Print timings in `scale` units (eg time.Millisecond) rather +// than nanos. +func LogScaledOnCue(r Registry, ch chan interface{}, scale time.Duration, l Logger) { du := float64(scale) duSuffix := scale.String()[1:] - for _ = range time.Tick(freq) { + for _ = range ch { r.Each(func(name string, i interface{}) { switch metric := i.(type) { case Counter: diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go index b3bab64e15b..a8e67228a45 100644 --- a/vendor/github.com/rcrowley/go-metrics/registry.go +++ b/vendor/github.com/rcrowley/go-metrics/registry.go @@ -64,8 +64,10 @@ func NewRegistry() Registry { // Call the given function for each registered metric. func (r *StandardRegistry) Each(f func(string, interface{})) { - for name, i := range r.registered() { - f(name, i) + metrics := r.registered() + for i := range metrics { + kv := &metrics[i] + f(kv.name, kv.value) } } @@ -211,12 +213,20 @@ func (r *StandardRegistry) register(name string, i interface{}) error { return nil } -func (r *StandardRegistry) registered() map[string]interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - metrics := make(map[string]interface{}, len(r.metrics)) +type metricKV struct { + name string + value interface{} +} + +func (r *StandardRegistry) registered() []metricKV { + r.mutex.RLock() + defer r.mutex.RUnlock() + metrics := make([]metricKV, 0, len(r.metrics)) for name, i := range r.metrics { - metrics[name] = i + metrics = append(metrics, metricKV{ + name: name, + value: i, + }) } return metrics } diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go index 11c6b785a0f..4047ab3d373 100644 --- a/vendor/github.com/rcrowley/go-metrics/runtime.go +++ b/vendor/github.com/rcrowley/go-metrics/runtime.go @@ -3,6 +3,7 @@ package metrics import ( "runtime" "runtime/pprof" + "sync" "time" ) @@ -49,7 +50,8 @@ var ( numGC uint32 numCgoCalls int64 - threadCreateProfile = pprof.Lookup("threadcreate") + threadCreateProfile = pprof.Lookup("threadcreate") + registerRuntimeMetricsOnce = sync.Once{} ) // Capture new values for the Go runtime statistics exported in @@ -146,67 +148,69 @@ func CaptureRuntimeMemStatsOnce(r Registry) { // specifically runtime.MemStats. The runtimeMetrics are named by their // fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. func RegisterRuntimeMemStats(r Registry) { - runtimeMetrics.MemStats.Alloc = NewGauge() - runtimeMetrics.MemStats.BuckHashSys = NewGauge() - runtimeMetrics.MemStats.DebugGC = NewGauge() - runtimeMetrics.MemStats.EnableGC = NewGauge() - runtimeMetrics.MemStats.Frees = NewGauge() - runtimeMetrics.MemStats.HeapAlloc = NewGauge() - runtimeMetrics.MemStats.HeapIdle = NewGauge() - runtimeMetrics.MemStats.HeapInuse = NewGauge() - runtimeMetrics.MemStats.HeapObjects = NewGauge() - runtimeMetrics.MemStats.HeapReleased = NewGauge() - runtimeMetrics.MemStats.HeapSys = NewGauge() - runtimeMetrics.MemStats.LastGC = NewGauge() - runtimeMetrics.MemStats.Lookups = NewGauge() - runtimeMetrics.MemStats.Mallocs = NewGauge() - runtimeMetrics.MemStats.MCacheInuse = NewGauge() - runtimeMetrics.MemStats.MCacheSys = NewGauge() - runtimeMetrics.MemStats.MSpanInuse = NewGauge() - runtimeMetrics.MemStats.MSpanSys = NewGauge() - runtimeMetrics.MemStats.NextGC = NewGauge() - runtimeMetrics.MemStats.NumGC = NewGauge() - runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() - runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) - runtimeMetrics.MemStats.PauseTotalNs = NewGauge() - runtimeMetrics.MemStats.StackInuse = NewGauge() - runtimeMetrics.MemStats.StackSys = NewGauge() - runtimeMetrics.MemStats.Sys = NewGauge() - runtimeMetrics.MemStats.TotalAlloc = NewGauge() - runtimeMetrics.NumCgoCall = NewGauge() - runtimeMetrics.NumGoroutine = NewGauge() - runtimeMetrics.NumThread = NewGauge() - runtimeMetrics.ReadMemStats = NewTimer() + registerRuntimeMetricsOnce.Do(func() { + runtimeMetrics.MemStats.Alloc = NewGauge() + runtimeMetrics.MemStats.BuckHashSys = NewGauge() + runtimeMetrics.MemStats.DebugGC = NewGauge() + runtimeMetrics.MemStats.EnableGC = NewGauge() + runtimeMetrics.MemStats.Frees = NewGauge() + runtimeMetrics.MemStats.HeapAlloc = NewGauge() + runtimeMetrics.MemStats.HeapIdle = NewGauge() + runtimeMetrics.MemStats.HeapInuse = NewGauge() + runtimeMetrics.MemStats.HeapObjects = NewGauge() + runtimeMetrics.MemStats.HeapReleased = NewGauge() + runtimeMetrics.MemStats.HeapSys = NewGauge() + runtimeMetrics.MemStats.LastGC = NewGauge() + runtimeMetrics.MemStats.Lookups = NewGauge() + runtimeMetrics.MemStats.Mallocs = NewGauge() + runtimeMetrics.MemStats.MCacheInuse = NewGauge() + runtimeMetrics.MemStats.MCacheSys = NewGauge() + runtimeMetrics.MemStats.MSpanInuse = NewGauge() + runtimeMetrics.MemStats.MSpanSys = NewGauge() + runtimeMetrics.MemStats.NextGC = NewGauge() + runtimeMetrics.MemStats.NumGC = NewGauge() + runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() + runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) + runtimeMetrics.MemStats.PauseTotalNs = NewGauge() + runtimeMetrics.MemStats.StackInuse = NewGauge() + runtimeMetrics.MemStats.StackSys = NewGauge() + runtimeMetrics.MemStats.Sys = NewGauge() + runtimeMetrics.MemStats.TotalAlloc = NewGauge() + runtimeMetrics.NumCgoCall = NewGauge() + runtimeMetrics.NumGoroutine = NewGauge() + runtimeMetrics.NumThread = NewGauge() + runtimeMetrics.ReadMemStats = NewTimer() - r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) - r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) - r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) - r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) - r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) - r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) - r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) - r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) - r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) - r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) - r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) - r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) - r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) - r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) - r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) - r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) - r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) - r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) - r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) - r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) - r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) - r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) - r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) - r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) - r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) - r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) - r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) - r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) - r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) - r.Register("runtime.NumThread", runtimeMetrics.NumThread) - r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) + r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) + r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) + r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) + r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) + r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) + r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) + r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) + r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) + r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) + r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) + r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) + r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) + r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) + r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) + r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) + r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) + r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) + r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) + r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) + r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) + r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) + r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) + r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) + r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) + r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) + r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) + r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) + r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) + r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) + r.Register("runtime.NumThread", runtimeMetrics.NumThread) + r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) + }) } diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/TGSExchange.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/TGSExchange.go index 8ad3e55ee76..93ff1dbaf03 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/TGSExchange.go +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/TGSExchange.go @@ -60,7 +60,7 @@ func (cl *Client) TGSExchange(tgsReq messages.TGSReq, kdcRealm string, tgt messa return tgsReq, tgsRep, err } } - tgsReq, err = messages.NewTGSReq(cl.Credentials.CName(), kdcRealm, cl.Config, tgt, sessionKey, tgsReq.ReqBody.SName, tgsReq.Renewal) + tgsReq, err = messages.NewTGSReq(cl.Credentials.CName(), realm, cl.Config, tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, tgsReq.ReqBody.SName, tgsReq.Renewal) if err != nil { return tgsReq, tgsRep, err } diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/client.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/client.go index 6e4c83c2c42..cc931748aea 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/client.go +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/client.go @@ -188,6 +188,18 @@ func (cl *Client) Login() error { return nil } +// AffirmLogin will only perform an AS exchange with the KDC if the client does not already have a TGT. +func (cl *Client) AffirmLogin() error { + _, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain()) + if err != nil || time.Now().UTC().After(endTime) { + err := cl.Login() + if err != nil { + return fmt.Errorf("could not get valid TGT for client's realm: %v", err) + } + } + return nil +} + // realmLogin obtains or renews a TGT and establishes a session for the realm specified. func (cl *Client) realmLogin(realm string) error { if realm == cl.Credentials.Domain() { diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/passwd.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/passwd.go index e6d41805902..da838edcdbb 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/passwd.go +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/passwd.go @@ -44,7 +44,7 @@ func (cl *Client) ChangePasswd(newPasswd string) (bool, error) { return false, err } if r.ResultCode != KRB5_KPASSWD_SUCCESS { - return false, fmt.Errorf("error response from kdamin: %s", r.Result) + return false, fmt.Errorf("error response from kadmin: code: %d; result: %s; krberror: %v", r.ResultCode, r.Result, r.KRBError) } cl.Credentials.WithPassword(newPasswd) return true, nil diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/settings.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/settings.go index 516c823140e..12c04c45262 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/settings.go +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/settings.go @@ -38,7 +38,7 @@ func (s *Settings) DisablePAFXFAST() bool { // s := NewSettings(AssumePreAuthentication(true)) func AssumePreAuthentication(b bool) func(*Settings) { return func(s *Settings) { - s.disablePAFXFast = b + s.assumePreAuthentication = b } } diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/config/hosts.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/config/hosts.go index a58c234178d..a67989f9334 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/config/hosts.go +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/config/hosts.go @@ -18,37 +18,41 @@ func (c *Config) GetKDCs(realm string, tcp bool) (int, map[int]string, error) { kdcs := make(map[int]string) var count int - // Use DNS to resolve kerberos SRV records if configured to do so in krb5.conf. - if c.LibDefaults.DNSLookupKDC { - proto := "udp" - if tcp { - proto = "tcp" - } - c, addrs, err := dnsutils.OrderedSRV("kerberos", proto, realm) - if err != nil { - return count, kdcs, err - } - if len(addrs) < 1 { - return count, kdcs, fmt.Errorf("no KDC SRV records found for realm %s", realm) - } - count = c - for k, v := range addrs { - kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port)) - } - } else { - // Get the KDCs from the krb5.conf an order them randomly for preference. - var ks []string - for _, r := range c.Realms { - if r.Realm == realm { - ks = r.KDC - break - } - } - count = len(ks) - if count < 1 { - return count, kdcs, fmt.Errorf("no KDCs defined in configuration for realm %s", realm) + // Get the KDCs from the krb5.conf. + var ks []string + for _, r := range c.Realms { + if r.Realm != realm { + continue } + ks = r.KDC + } + count = len(ks) + + if count > 0 { + // Order the kdcs randomly for preference. kdcs = randServOrder(ks) + return count, kdcs, nil + } + + if !c.LibDefaults.DNSLookupKDC { + return count, kdcs, fmt.Errorf("no KDCs defined in configuration for realm %s", realm) + } + + // Use DNS to resolve kerberos SRV records. + proto := "udp" + if tcp { + proto = "tcp" + } + index, addrs, err := dnsutils.OrderedSRV("kerberos", proto, realm) + if err != nil { + return count, kdcs, err + } + if len(addrs) < 1 { + return count, kdcs, fmt.Errorf("no KDC SRV records found for realm %s", realm) + } + count = index + for k, v := range addrs { + kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port)) } return count, kdcs, nil } diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/credentials.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/credentials.go index 62acab7a154..beec0664720 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/credentials.go +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/credentials.go @@ -288,6 +288,11 @@ func (c *Credentials) Expired() bool { return false } +// ValidUntil returns the credential's valid until date +func (c *Credentials) ValidUntil() time.Time { + return c.validUntil +} + // Attributes returns the Credentials' attributes map. func (c *Credentials) Attributes() map[string]interface{} { return c.attributes diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/keytab/keytab.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/keytab/keytab.go index 0c7fc382034..22c02044b31 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/keytab/keytab.go +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/keytab/keytab.go @@ -141,6 +141,10 @@ func (kt *Keytab) Write(w io.Writer) (int, error) { // Unmarshal byte slice of Keytab data into Keytab type. func (kt *Keytab) Unmarshal(b []byte) error { + if len(b) < 2 { + return fmt.Errorf("byte array is less than 2 bytes: %d", len(b)) + } + //The first byte of the file always has the value 5 if b[0] != keytabFirstByte { return errors.New("invalid keytab data. First byte does not equal 5") @@ -165,7 +169,10 @@ func (kt *Keytab) Unmarshal(b []byte) error { */ // n tracks position in the byte array n := 2 - l := readInt32(b, &n, &endian) + l, err := readInt32(b, &n, &endian) + if err != nil { + return err + } for l != 0 { if l < 0 { //Zero padded so skip over @@ -173,23 +180,52 @@ func (kt *Keytab) Unmarshal(b []byte) error { n = n + int(l) } else { //fmt.Printf("Bytes for entry: %v\n", b[n:n+int(l)]) + if n < 0 { + return fmt.Errorf("%d can't be less than zero", n) + } + if n+int(l) > len(b) { + return fmt.Errorf("%s's length is less than %d", b, n+int(l)) + } eb := b[n : n+int(l)] n = n + int(l) ke := newKeytabEntry() // p keeps track as to where we are in the byte stream var p int + var err error parsePrincipal(eb, &p, kt, &ke, &endian) - ke.Timestamp = readTimestamp(eb, &p, &endian) - ke.KVNO8 = uint8(readInt8(eb, &p, &endian)) - ke.Key.KeyType = int32(readInt16(eb, &p, &endian)) - kl := int(readInt16(eb, &p, &endian)) - ke.Key.KeyValue = readBytes(eb, &p, kl, &endian) + ke.Timestamp, err = readTimestamp(eb, &p, &endian) + if err != nil { + return err + } + rei8, err := readInt8(eb, &p, &endian) + if err != nil { + return err + } + ke.KVNO8 = uint8(rei8) + rei16, err := readInt16(eb, &p, &endian) + if err != nil { + return err + } + ke.Key.KeyType = int32(rei16) + rei16, err = readInt16(eb, &p, &endian) + if err != nil { + return err + } + kl := int(rei16) + ke.Key.KeyValue, err = readBytes(eb, &p, kl, &endian) + if err != nil { + return err + } //The 32-bit key version overrides the 8-bit key version. // To determine if it is present, the implementation must check that at least 4 bytes remain in the record after the other fields are read, // and that the value of the 32-bit integer contained in those bytes is non-zero. if len(eb)-p >= 4 { // The 32-bit key may be present - ke.KVNO = uint32(readInt32(eb, &p, &endian)) + ri32, err := readInt32(eb, &p, &endian) + if err != nil { + return err + } + ke.KVNO = uint32(ri32) } if ke.KVNO == 0 { // Handles if the value from the last 4 bytes was zero and also if there are not the 4 bytes present. Makes sense to put the same value here as KVNO8 @@ -199,11 +235,15 @@ func (kt *Keytab) Unmarshal(b []byte) error { kt.Entries = append(kt.Entries, ke) } // Check if there are still 4 bytes left to read - if n > len(b) || len(b[n:]) < 4 { + // Also check that n is greater than zero + if n < 0 || n > len(b) || len(b[n:]) < 4 { break } // Read the size of the next entry - l = readInt32(b, &n, &endian) + l, err = readInt32(b, &n, &endian) + if err != nil { + return err + } } return nil } @@ -249,20 +289,41 @@ func (e entry) marshal(v int) ([]byte, error) { // Parse the Keytab bytes of a principal into a Keytab entry's principal. func parsePrincipal(b []byte, p *int, kt *Keytab, ke *entry, e *binary.ByteOrder) error { - ke.Principal.NumComponents = readInt16(b, p, e) + var err error + ke.Principal.NumComponents, err = readInt16(b, p, e) + if err != nil { + return err + } if kt.version == 1 { //In version 1 the number of components includes the realm. Minus 1 to make consistent with version 2 ke.Principal.NumComponents-- } - lenRealm := readInt16(b, p, e) - ke.Principal.Realm = string(readBytes(b, p, int(lenRealm), e)) + lenRealm, err := readInt16(b, p, e) + if err != nil { + return err + } + realmB, err := readBytes(b, p, int(lenRealm), e) + if err != nil { + return err + } + ke.Principal.Realm = string(realmB) for i := 0; i < int(ke.Principal.NumComponents); i++ { - l := readInt16(b, p, e) - ke.Principal.Components = append(ke.Principal.Components, string(readBytes(b, p, int(l), e))) + l, err := readInt16(b, p, e) + if err != nil { + return err + } + compB, err := readBytes(b, p, int(l), e) + if err != nil { + return err + } + ke.Principal.Components = append(ke.Principal.Components, string(compB)) } if kt.version != 1 { //Name Type is omitted in version 1 - ke.Principal.NameType = readInt32(b, p, e) + ke.Principal.NameType, err = readInt32(b, p, e) + if err != nil { + return err + } } return nil } @@ -315,12 +376,23 @@ func marshalString(s string, v int) ([]byte, error) { } // Read bytes representing a timestamp. -func readTimestamp(b []byte, p *int, e *binary.ByteOrder) time.Time { - return time.Unix(int64(readInt32(b, p, e)), 0) +func readTimestamp(b []byte, p *int, e *binary.ByteOrder) (time.Time, error) { + i32, err := readInt32(b, p, e) + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(i32), 0), nil } // Read bytes representing an eight bit integer. -func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8) { +func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8, err error) { + if *p < 0 { + return 0, fmt.Errorf("%d cannot be less than zero", *p) + } + + if (*p + 1) > len(b) { + return 0, fmt.Errorf("%s's length is less than %d", b, *p+1) + } buf := bytes.NewBuffer(b[*p : *p+1]) binary.Read(buf, *e, &i) *p++ @@ -328,7 +400,15 @@ func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8) { } // Read bytes representing a sixteen bit integer. -func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16) { +func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16, err error) { + if *p < 0 { + return 0, fmt.Errorf("%d cannot be less than zero", *p) + } + + if (*p + 2) > len(b) { + return 0, fmt.Errorf("%s's length is less than %d", b, *p+2) + } + buf := bytes.NewBuffer(b[*p : *p+2]) binary.Read(buf, *e, &i) *p += 2 @@ -336,19 +416,36 @@ func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16) { } // Read bytes representing a thirty two bit integer. -func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32) { +func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32, err error) { + if *p < 0 { + return 0, fmt.Errorf("%d cannot be less than zero", *p) + } + + if (*p + 4) > len(b) { + return 0, fmt.Errorf("%s's length is less than %d", b, *p+4) + } + buf := bytes.NewBuffer(b[*p : *p+4]) binary.Read(buf, *e, &i) *p += 4 return } -func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) []byte { - buf := bytes.NewBuffer(b[*p : *p+s]) +func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) ([]byte, error) { + if s < 0 { + return nil, fmt.Errorf("%d cannot be less than zero", s) + } + i := *p + s + if i > len(b) { + return nil, fmt.Errorf("%s's length is greater than %d", b, i) + } + buf := bytes.NewBuffer(b[*p:i]) r := make([]byte, s) - binary.Read(buf, *e, &r) + if err := binary.Read(buf, *e, &r); err != nil { + return nil, err + } *p += s - return r + return r, nil } func isNativeEndianLittle() bool { diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/krb5Token.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/krb5Token.go index 8d82df2af39..73bd64d154e 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/krb5Token.go +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/krb5Token.go @@ -70,7 +70,13 @@ func (m *KRB5Token) Unmarshal(b []byte) error { if err != nil { return fmt.Errorf("error unmarshalling KRB5Token OID: %v", err) } + if !oid.Equal(gssapi.OID(gssapi.OIDKRB5)) { + return fmt.Errorf("error unmarshalling KRB5Token, OID is %s not %s", oid.String(), gssapi.OID(gssapi.OIDKRB5).String()) + } m.OID = oid + if len(r) < 2 { + return fmt.Errorf("krb5token too short") + } m.tokID = r[0:2] switch hex.EncodeToString(m.tokID) { case TOK_ID_KRB_AP_REQ: diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/negotiationToken.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/negotiationToken.go index 4a80f3595e0..34d305f3758 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/negotiationToken.go +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/negotiationToken.go @@ -169,10 +169,6 @@ func (n *NegTokenInit) Verify() (bool, gssapi.Status) { return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "MechToken is not a KRB5 token as expected"} } } - // RFC4178 states that the initial negotiation message can optionally contain the initial mechanism token for the preferred mechanism of the client. - if !mt.OID.Equal(n.MechTypes[0]) { - return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "OID of MechToken does not match the first in the MechTypeList"} - } // Verify the mechtoken return n.mechToken.Verify() } diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/spnego.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/spnego.go index f82947c7e13..b368d641922 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/spnego.go +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/spnego.go @@ -44,7 +44,7 @@ func (s *SPNEGO) OID() asn1.ObjectIdentifier { // AcquireCred is the GSS-API method to acquire a client credential via Kerberos for SPNEGO. func (s *SPNEGO) AcquireCred() error { - return s.client.Login() + return s.client.AffirmLogin() } // InitSecContext is the GSS-API method for the client to a generate a context token to the service via Kerberos. @@ -132,6 +132,10 @@ func (s *SPNEGOToken) Marshal() ([]byte, error) { func (s *SPNEGOToken) Unmarshal(b []byte) error { var r []byte var err error + // We need some data in the array + if len(b) < 1 { + return fmt.Errorf("provided byte array is empty") + } if b[0] != byte(161) { // Not a NegTokenResp/Targ could be a NegTokenInit var oid asn1.ObjectIdentifier diff --git a/vendor/modules.txt b/vendor/modules.txt index 2c7c8df5730..208d8566ddb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -93,7 +93,7 @@ github.com/Microsoft/go-winio github.com/Microsoft/go-winio/pkg/guid # github.com/Microsoft/hcsshim v0.8.7 github.com/Microsoft/hcsshim/osversion -# github.com/Shopify/sarama v0.0.0-00010101000000-000000000000 => github.com/elastic/sarama v0.0.0-20191122160421-355d120d0970 +# github.com/Shopify/sarama v0.0.0-00010101000000-000000000000 => github.com/elastic/sarama v1.24.1-elastic.0.20200519143807-cbc80333a91e github.com/Shopify/sarama # github.com/StackExchange/wmi v0.0.0-20170221213301-9f32b5905fd6 github.com/StackExchange/wmi @@ -404,7 +404,7 @@ github.com/dop251/goja_nodejs/require github.com/dop251/goja_nodejs/util # github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 github.com/dustin/go-humanize -# github.com/eapache/go-resiliency v1.1.0 +# github.com/eapache/go-resiliency v1.2.0 github.com/eapache/go-resiliency/breaker # github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 github.com/eapache/go-xerial-snappy @@ -593,7 +593,7 @@ github.com/h2non/filetype/types github.com/hashicorp/errwrap # github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-uuid v1.0.1 +# github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/go-uuid # github.com/hashicorp/go-version v1.0.0 github.com/hashicorp/go-version @@ -629,7 +629,7 @@ github.com/json-iterator/go github.com/jstemmer/go-junit-report github.com/jstemmer/go-junit-report/formatter github.com/jstemmer/go-junit-report/parser -# github.com/klauspost/compress v1.9.3-0.20191122130757-c099ac9f21dd +# github.com/klauspost/compress v1.9.8 github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -694,7 +694,7 @@ github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runc v1.0.0-rc9 github.com/opencontainers/runc/libcontainer/system github.com/opencontainers/runc/libcontainer/user -# github.com/pierrec/lz4 v2.2.6+incompatible +# github.com/pierrec/lz4 v2.4.1+incompatible github.com/pierrec/lz4 github.com/pierrec/lz4/internal/xxh32 # github.com/pierrre/gotestcover v0.0.0-20160113212533-7b94f124d338 @@ -719,7 +719,7 @@ github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util # github.com/prometheus/prometheus v2.5.0+incompatible github.com/prometheus/prometheus/prompb -# github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a +# github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 github.com/rcrowley/go-metrics # github.com/reviewdog/errorformat v0.0.0-20200109134752-8983be9bc7dd github.com/reviewdog/errorformat @@ -1113,7 +1113,7 @@ gopkg.in/jcmturner/aescts.v1 gopkg.in/jcmturner/dnsutils.v1 # gopkg.in/jcmturner/goidentity.v3 v3.0.0 gopkg.in/jcmturner/goidentity.v3 -# gopkg.in/jcmturner/gokrb5.v7 v7.3.0 +# gopkg.in/jcmturner/gokrb5.v7 v7.5.0 gopkg.in/jcmturner/gokrb5.v7/asn1tools gopkg.in/jcmturner/gokrb5.v7/client gopkg.in/jcmturner/gokrb5.v7/config