diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 7ddb46be7d4..5612e505c63 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -17,6 +17,7 @@ https://github.com/elastic/beats/compare/v6.2.3...master[Check the HEAD diff] - Rename beat.cpu.*.time metrics to beat.cpu.*.time.ms. {pull}6449[6449] - Mark `system.syslog.message` and `system.auth.message` as `text` instead of `keyword`. {pull}6589[6589] - Allow override of dynamic template `match_mapping_type` for fields with object_type. {pull}6691[6691] +- Set default kafka version to 1.0.0 in kafka output. Older versions are still supported by configuring the `version` setting. {pull}7025[7025] *Auditbeat* @@ -56,6 +57,7 @@ https://github.com/elastic/beats/compare/v6.2.3...master[Check the HEAD diff] - Ensure Kubernetes labels/annotations don't break mapping {pull}6490[6490] - Ensure that the dashboard zip files can't contain files outside of the kibana directory. {pull}6921[6921] - Fix map overwrite panics by cloning shared structs before doing the update. {pull}6947[6947] +- Fix error if lz4 compression is used with the kafka output. {pull}7025[7025] *Auditbeat* @@ -121,6 +123,7 @@ https://github.com/elastic/beats/compare/v6.2.3...master[Check the HEAD diff] - Add IP-addresses and MAC-addresses to add_host_metadata. {pull}6878[6878] - Add a default seccomp (secure computing) filter on Linux that prohibits execve, execveat, fork, and vfork syscalls. A custom policy can be configured. {issue}5213[5213] +- Update Sarama to v1.16.0, adding better support for kafka 0.11, 1.0, and 1.1 {pull}7025[7025] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index 1f696fdfab0..a643b853b4e 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -2012,8 +2012,8 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- Dependency: github.com/Shopify/sarama -Version: v1.12/enh/offset-replica-id -Revision: c292021939f5aba53b3ffc2cb09c7aadb32a42df +Version: v1.16.0/enh/offset-replica-id +Revision: 32b4ad5c9537ed14e471779b76713ff65420db39 License type (autodetected): MIT ./vendor/github.com/Shopify/sarama/LICENSE: -------------------------------------------------------------------- diff --git a/libbeat/outputs/kafka/config.go b/libbeat/outputs/kafka/config.go index fab2d680ce1..1755c808ff7 100644 --- a/libbeat/outputs/kafka/config.go +++ b/libbeat/outputs/kafka/config.go @@ -6,8 +6,13 @@ import ( "strings" "time" + "github.com/Shopify/sarama" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/fmtstr" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/libbeat/monitoring" + "github.com/elastic/beats/libbeat/monitoring/adapter" "github.com/elastic/beats/libbeat/outputs" "github.com/elastic/beats/libbeat/outputs/codec" ) @@ -44,8 +49,17 @@ type metaRetryConfig struct { Backoff time.Duration `config:"backoff" validate:"min=0"` } -var ( - defaultConfig = kafkaConfig{ +var compressionModes = map[string]sarama.CompressionCodec{ + "none": sarama.CompressionNone, + "no": sarama.CompressionNone, + "off": sarama.CompressionNone, + "gzip": sarama.CompressionGZIP, + "lz4": sarama.CompressionLZ4, + "snappy": sarama.CompressionSnappy, +} + +func defaultConfig() kafkaConfig { + return kafkaConfig{ Hosts: nil, TLS: nil, Timeout: 30 * time.Second, @@ -62,14 +76,14 @@ var ( RequiredACKs: nil, // use library default BrokerTimeout: 10 * time.Second, Compression: "gzip", - Version: "", + Version: "1.0.0", MaxRetries: 3, ClientID: "beats", ChanBufferSize: 256, Username: "", Password: "", } -) +} func (c *kafkaConfig) Validate() error { if len(c.Hosts) == 0 { @@ -90,3 +104,94 @@ func (c *kafkaConfig) Validate() error { return nil } + +func newSaramaConfig(config *kafkaConfig) (*sarama.Config, error) { + partitioner, err := makePartitioner(config.Partition) + if err != nil { + return nil, err + } + + k := sarama.NewConfig() + + // configure network level properties + timeout := config.Timeout + k.Net.DialTimeout = timeout + k.Net.ReadTimeout = timeout + k.Net.WriteTimeout = timeout + k.Net.KeepAlive = config.KeepAlive + k.Producer.Timeout = config.BrokerTimeout + + tls, err := outputs.LoadTLSConfig(config.TLS) + if err != nil { + return nil, err + } + if tls != nil { + k.Net.TLS.Enable = true + k.Net.TLS.Config = tls.BuildModuleConfig("") + } + + if config.Username != "" { + k.Net.SASL.Enable = true + k.Net.SASL.User = config.Username + k.Net.SASL.Password = config.Password + } + + // configure metadata update properties + k.Metadata.Retry.Max = config.Metadata.Retry.Max + k.Metadata.Retry.Backoff = config.Metadata.Retry.Backoff + k.Metadata.RefreshFrequency = config.Metadata.RefreshFreq + + // configure producer API properties + if config.MaxMessageBytes != nil { + k.Producer.MaxMessageBytes = *config.MaxMessageBytes + } + if config.RequiredACKs != nil { + k.Producer.RequiredAcks = sarama.RequiredAcks(*config.RequiredACKs) + } + + compressionMode, ok := compressionModes[strings.ToLower(config.Compression)] + if !ok { + return nil, fmt.Errorf("Unknown compression mode: '%v'", config.Compression) + } + k.Producer.Compression = compressionMode + + k.Producer.Return.Successes = true // enable return channel for signaling + k.Producer.Return.Errors = true + + // have retries being handled by libbeat, disable retries in sarama library + retryMax := config.MaxRetries + if retryMax < 0 { + retryMax = 1000 + } + k.Producer.Retry.Max = retryMax + // TODO: k.Producer.Retry.Backoff = ? + + // configure per broker go channel buffering + k.ChannelBufferSize = config.ChanBufferSize + + // configure client ID + k.ClientID = config.ClientID + + version, ok := kafkaVersions[config.Version] + if !ok { + return nil, fmt.Errorf("Unknown/unsupported kafka version: %v", config.Version) + } + k.Version = version + + k.MetricRegistry = kafkaMetricsRegistry() + + k.Producer.Partitioner = partitioner + k.MetricRegistry = adapter.GetGoMetrics( + monitoring.Default, + "libbeat.outputs.kafka", + adapter.Rename("incoming-byte-rate", "bytes_read"), + adapter.Rename("outgoing-byte-rate", "bytes_write"), + adapter.GoMetricsNilify, + ) + + if err := k.Validate(); err != nil { + logp.Err("Invalid kafka configuration: %v", err) + return nil, err + } + return k, nil +} diff --git a/libbeat/outputs/kafka/config_test.go b/libbeat/outputs/kafka/config_test.go new file mode 100644 index 00000000000..20ba69ac2da --- /dev/null +++ b/libbeat/outputs/kafka/config_test.go @@ -0,0 +1,41 @@ +package kafka + +import ( + "testing" + + "github.com/elastic/beats/libbeat/common" +) + +func TestConfigAcceptValid(t *testing.T) { + tests := map[string]common.MapStr{ + "default config is valid": common.MapStr{}, + "lz4 with 0.11": common.MapStr{ + "compression": "lz4", + "version": "0.11", + }, + "lz4 with 1.0": common.MapStr{ + "compression": "lz4", + "version": "1.0.0", + }, + } + + for name, test := range tests { + test := test + t.Run(name, func(t *testing.T) { + c, err := common.NewConfigFrom(test) + if err != nil { + t.Fatalf("Can not create test configuration: %v", err) + } + c.SetString("hosts", 0, "localhost") + + cfg := defaultConfig() + if err := c.Unpack(&cfg); err != nil { + t.Fatalf("Unpacking configuration failed: %v", err) + } + + if _, err := newSaramaConfig(&cfg); err != nil { + t.Fatalf("Failure creating sarama config: %v", err) + } + }) + } +} diff --git a/libbeat/outputs/kafka/kafka.go b/libbeat/outputs/kafka/kafka.go index 3756bad665b..4f135cd440f 100644 --- a/libbeat/outputs/kafka/kafka.go +++ b/libbeat/outputs/kafka/kafka.go @@ -2,8 +2,6 @@ package kafka import ( "errors" - "fmt" - "strings" "sync" "time" @@ -13,8 +11,6 @@ import ( "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" - "github.com/elastic/beats/libbeat/monitoring" - "github.com/elastic/beats/libbeat/monitoring/adapter" "github.com/elastic/beats/libbeat/outputs" "github.com/elastic/beats/libbeat/outputs/codec" "github.com/elastic/beats/libbeat/outputs/outil" @@ -45,54 +41,6 @@ var ( errNoHosts = errors.New("No hosts configured") ) -// TODO: remove me. -// Compat version overwrite for missing versions in sarama -// Public API is compatible between these versions. -var ( - v0_10_2_1 = sarama.V0_10_2_0 - v0_11_0_0 = sarama.V0_10_2_0 -) - -var ( - compressionModes = map[string]sarama.CompressionCodec{ - "none": sarama.CompressionNone, - "no": sarama.CompressionNone, - "off": sarama.CompressionNone, - "gzip": sarama.CompressionGZIP, - "lz4": sarama.CompressionLZ4, - "snappy": sarama.CompressionSnappy, - } - - kafkaVersions = map[string]sarama.KafkaVersion{ - "": sarama.V0_8_2_0, - - "0.8.2.0": sarama.V0_8_2_0, - "0.8.2.1": sarama.V0_8_2_1, - "0.8.2.2": sarama.V0_8_2_2, - "0.8.2": sarama.V0_8_2_2, - "0.8": sarama.V0_8_2_2, - - "0.9.0.0": sarama.V0_9_0_0, - "0.9.0.1": sarama.V0_9_0_1, - "0.9.0": sarama.V0_9_0_1, - "0.9": sarama.V0_9_0_1, - - "0.10.0.0": sarama.V0_10_0_0, - "0.10.0.1": sarama.V0_10_0_1, - "0.10.0": sarama.V0_10_0_1, - "0.10.1.0": sarama.V0_10_1_0, - "0.10.1": sarama.V0_10_1_0, - "0.10.2.0": sarama.V0_10_2_0, - "0.10.2.1": v0_10_2_1, - "0.10.2": v0_10_2_1, - "0.10": v0_10_2_1, - - "0.11.0.0": v0_11_0_0, - "0.11.0": v0_11_0_0, - "0.11": v0_11_0_0, - } -) - func init() { sarama.Logger = kafkaLogger{} @@ -118,7 +66,7 @@ func makeKafka( ) (outputs.Group, error) { debugf("initialize kafka output") - config := defaultConfig + config := defaultConfig() if err := cfg.Unpack(&config); err != nil { return outputs.Fail(err) } @@ -133,7 +81,7 @@ func makeKafka( return outputs.Fail(err) } - libCfg, err := newKafkaConfig(&config) + libCfg, err := newSaramaConfig(&config) if err != nil { return outputs.Fail(err) } @@ -159,94 +107,3 @@ func makeKafka( } return outputs.Success(config.BulkMaxSize, retry, client) } - -func newKafkaConfig(config *kafkaConfig) (*sarama.Config, error) { - partitioner, err := makePartitioner(config.Partition) - if err != nil { - return nil, err - } - - k := sarama.NewConfig() - - // configure network level properties - timeout := config.Timeout - k.Net.DialTimeout = timeout - k.Net.ReadTimeout = timeout - k.Net.WriteTimeout = timeout - k.Net.KeepAlive = config.KeepAlive - k.Producer.Timeout = config.BrokerTimeout - - tls, err := outputs.LoadTLSConfig(config.TLS) - if err != nil { - return nil, err - } - if tls != nil { - k.Net.TLS.Enable = true - k.Net.TLS.Config = tls.BuildModuleConfig("") - } - - if config.Username != "" { - k.Net.SASL.Enable = true - k.Net.SASL.User = config.Username - k.Net.SASL.Password = config.Password - } - - // configure metadata update properties - k.Metadata.Retry.Max = config.Metadata.Retry.Max - k.Metadata.Retry.Backoff = config.Metadata.Retry.Backoff - k.Metadata.RefreshFrequency = config.Metadata.RefreshFreq - - // configure producer API properties - if config.MaxMessageBytes != nil { - k.Producer.MaxMessageBytes = *config.MaxMessageBytes - } - if config.RequiredACKs != nil { - k.Producer.RequiredAcks = sarama.RequiredAcks(*config.RequiredACKs) - } - - compressionMode, ok := compressionModes[strings.ToLower(config.Compression)] - if !ok { - return nil, fmt.Errorf("Unknown compression mode: '%v'", config.Compression) - } - k.Producer.Compression = compressionMode - - k.Producer.Return.Successes = true // enable return channel for signaling - k.Producer.Return.Errors = true - - // have retries being handled by libbeat, disable retries in sarama library - retryMax := config.MaxRetries - if retryMax < 0 { - retryMax = 1000 - } - k.Producer.Retry.Max = retryMax - // TODO: k.Producer.Retry.Backoff = ? - - // configure per broker go channel buffering - k.ChannelBufferSize = config.ChanBufferSize - - // configure client ID - k.ClientID = config.ClientID - if err := k.Validate(); err != nil { - logp.Err("Invalid kafka configuration: %v", err) - return nil, err - } - - version, ok := kafkaVersions[config.Version] - if !ok { - return nil, fmt.Errorf("Unknown/unsupported kafka version: %v", config.Version) - } - k.Version = version - - k.MetricRegistry = kafkaMetricsRegistry() - - k.Producer.Partitioner = partitioner - k.MetricRegistry = adapter.GetGoMetrics( - monitoring.Default, - "libbeat.outputs.kafka", - adapter.Rename("incoming-byte-rate", "bytes_read"), - adapter.Rename("outgoing-byte-rate", "bytes_write"), - adapter.GoMetricsNilify, - ) - - return k, nil -} diff --git a/libbeat/outputs/kafka/version.go b/libbeat/outputs/kafka/version.go new file mode 100644 index 00000000000..a2bd8355757 --- /dev/null +++ b/libbeat/outputs/kafka/version.go @@ -0,0 +1,60 @@ +package kafka + +import "github.com/Shopify/sarama" + +// TODO: remove me. +// Compat version overwrite for missing versions in sarama +// Public API is compatible between these versions. +var ( + v0_10_2_1 = parseKafkaVersion("0.10.2.1") + v0_11_0_1 = parseKafkaVersion("0.11.0.1") + v0_11_0_2 = parseKafkaVersion("0.11.0.2") + v1_0_1 = parseKafkaVersion("1.0.1") + v1_1_0 = parseKafkaVersion("1.1.0") + + kafkaVersions = map[string]sarama.KafkaVersion{ + "": sarama.V1_0_0_0, + + "0.8.2.0": sarama.V0_8_2_0, + "0.8.2.1": sarama.V0_8_2_1, + "0.8.2.2": sarama.V0_8_2_2, + "0.8.2": sarama.V0_8_2_2, + "0.8": sarama.V0_8_2_2, + + "0.9.0.0": sarama.V0_9_0_0, + "0.9.0.1": sarama.V0_9_0_1, + "0.9.0": sarama.V0_9_0_1, + "0.9": sarama.V0_9_0_1, + + "0.10.0.0": sarama.V0_10_0_0, + "0.10.0.1": sarama.V0_10_0_1, + "0.10.0": sarama.V0_10_0_1, + "0.10.1.0": sarama.V0_10_1_0, + "0.10.1": sarama.V0_10_1_0, + "0.10.2.0": sarama.V0_10_2_0, + "0.10.2.1": v0_10_2_1, + "0.10.2": v0_10_2_1, + "0.10": v0_10_2_1, + + "0.11.0.0": sarama.V0_11_0_0, + "0.11.0.1": v0_11_0_1, + "0.11.0.2": v0_11_0_2, + "0.11.0": v0_11_0_2, + "0.11": v0_11_0_2, + + "1.0.0": sarama.V1_0_0_0, + "1.0.1": v1_0_1, + "1.0": v1_0_1, + "1.1.0": v1_1_0, + "1.1": v1_1_0, + "1": v1_1_0, + } +) + +func parseKafkaVersion(s string) sarama.KafkaVersion { + v, err := sarama.ParseKafkaVersion(s) + if err != nil { + panic(err) + } + return v +} diff --git a/testing/environments/docker/kafka/Dockerfile b/testing/environments/docker/kafka/Dockerfile index 99d01f83d08..3bf9c1e8b59 100644 --- a/testing/environments/docker/kafka/Dockerfile +++ b/testing/environments/docker/kafka/Dockerfile @@ -4,7 +4,7 @@ ENV KAFKA_HOME /kafka # The advertised host is kafka. This means it will not work if container is started locally and connected from localhost to it ENV KAFKA_ADVERTISED_HOST kafka ENV KAFKA_LOGS_DIR="/kafka-logs" -ENV KAFKA_VERSION 0.10.2.1 +ENV KAFKA_VERSION 1.0.0 ENV _JAVA_OPTIONS "-Djava.net.preferIPv4Stack=true" ENV TERM=linux diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md index 0a0082df752..836841650c3 100644 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md @@ -1,5 +1,119 @@ # Changelog +#### Version 1.16.0 (2018-02-12) + +New Features: + - Add support for the Create/Delete Topics request/response pairs + ([#1007](https://github.com/Shopify/sarama/pull/1007), + [#1008](https://github.com/Shopify/sarama/pull/1008)). + - Add support for the Describe/Create/Delete ACL request/response pairs + ([#1009](https://github.com/Shopify/sarama/pull/1009)). + - Add support for the five transaction-related request/response pairs + ([#1016](https://github.com/Shopify/sarama/pull/1016)). + +Improvements: + - Permit setting version on mock producer responses + ([#999](https://github.com/Shopify/sarama/pull/999)). + - Add `NewMockBrokerListener` helper for testing TLS connections + ([#1019](https://github.com/Shopify/sarama/pull/1019)). + - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB + which results in much higher throughput in most cases + ([#1024](https://github.com/Shopify/sarama/pull/1024)). + - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to + reduce CPU and memory usage when processing many partitions + ([#1028](https://github.com/Shopify/sarama/pull/1028)). + - Assign relative offsets to messages in the producer to save the brokers a + recompression pass + ([#1002](https://github.com/Shopify/sarama/pull/1002), + [#1015](https://github.com/Shopify/sarama/pull/1015)). + +Bug Fixes: + - Fix producing uncompressed batches with the new protocol format + ([#1032](https://github.com/Shopify/sarama/issues/1032)). + - Fix consuming compacted topics with the new protocol format + ([#1005](https://github.com/Shopify/sarama/issues/1005)). + - Fix consuming topics with a mix of protocol formats + ([#1021](https://github.com/Shopify/sarama/issues/1021)). + - Fix consuming when the broker includes multiple batches in a single response + ([#1022](https://github.com/Shopify/sarama/issues/1022)). + - Fix detection of `PartialTrailingMessage` when the partial message was + truncated before the magic value indicating its version + ([#1030](https://github.com/Shopify/sarama/pull/1030)). + - Fix expectation-checking in the mock of `SyncProducer.SendMessages` + ([#1035](https://github.com/Shopify/sarama/pull/1035)). + +#### Version 1.15.0 (2017-12-08) + +New Features: + - Claim official support for Kafka 1.0, though it did already work + ([#984](https://github.com/Shopify/sarama/pull/984)). + - Helper methods for Kafka version numbers to/from strings + ([#989](https://github.com/Shopify/sarama/pull/989)). + - Implement CreatePartitions request/response + ([#985](https://github.com/Shopify/sarama/pull/985)). + +Improvements: + - Add error codes 45-60 + ([#986](https://github.com/Shopify/sarama/issues/986)). + +Bug Fixes: + - Fix slow consuming for certain Kafka 0.11/1.0 configurations + ([#982](https://github.com/Shopify/sarama/pull/982)). + - Correctly determine when a FetchResponse contains the new message format + ([#990](https://github.com/Shopify/sarama/pull/990)). + - Fix producing with multiple headers + ([#996](https://github.com/Shopify/sarama/pull/996)). + - Fix handling of truncated record batches + ([#998](https://github.com/Shopify/sarama/pull/998)). + - Fix leaking metrics when closing brokers + ([#991](https://github.com/Shopify/sarama/pull/991)). + +#### Version 1.14.0 (2017-11-13) + +New Features: + - Add support for the new Kafka 0.11 record-batch format, including the wire + protocol and the necessary behavioural changes in the producer and consumer. + Transactions and idempotency are not yet supported, but producing and + consuming should work with all the existing bells and whistles (batching, + compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta + of Arista Networks for this work. Part of + ([#901](https://github.com/Shopify/sarama/issues/901)). + +Bug Fixes: + - Fix encoding of ProduceResponse versions in test + ([#970](https://github.com/Shopify/sarama/pull/970)). + - Return partial replicas list when we have it + ([#975](https://github.com/Shopify/sarama/pull/975)). + +#### Version 1.13.0 (2017-10-04) + +New Features: + - Support for FetchRequest version 3 + ([#905](https://github.com/Shopify/sarama/pull/905)). + - Permit setting version on mock FetchResponses + ([#939](https://github.com/Shopify/sarama/pull/939)). + - Add a configuration option to support storing only minimal metadata for + extremely large clusters + ([#937](https://github.com/Shopify/sarama/pull/937)). + - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets + ([#932](https://github.com/Shopify/sarama/pull/932)). + +Improvements: + - Provide the block-level timestamp when consuming compressed messages + ([#885](https://github.com/Shopify/sarama/issues/885)). + - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned + by the broker, which can be meaningful + ([#930](https://github.com/Shopify/sarama/pull/930)). + - Use a `Ticker` to reduce consumer timer overhead at the cost of higher + variance in the actual timeout + ([#933](https://github.com/Shopify/sarama/pull/933)). + +Bug Fixes: + - Gracefully handle messages with negative timestamps + ([#907](https://github.com/Shopify/sarama/pull/907)). + - Raise a proper error when encountering an unknown message version + ([#940](https://github.com/Shopify/sarama/pull/940)). + #### Version 1.12.0 (2017-05-08) New Features: diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile index 626b09a5451..58a39e4f34d 100644 --- a/vendor/github.com/Shopify/sarama/Makefile +++ b/vendor/github.com/Shopify/sarama/Makefile @@ -1,7 +1,15 @@ default: fmt vet errcheck test +# Taken from https://github.com/codecov/example-go#caveat-multiple-files test: - go test -v -timeout 60s -race ./... + echo "" > coverage.txt + for d in `go list ./... | grep -v vendor`; do \ + go test -v -timeout 60s -race -coverprofile=profile.out -covermode=atomic $$d; \ + if [ -f profile.out ]; then \ + cat profile.out >> coverage.txt; \ + rm profile.out; \ + fi \ + done vet: go vet ./... diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md index 6e12a07ae08..28431f13eb8 100644 --- a/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/Shopify/sarama/README.md @@ -3,6 +3,7 @@ sarama [![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama) [![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) +[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). @@ -20,7 +21,7 @@ You might also want to look at the [Frequently Asked Questions](https://github.c Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. This means we currently officially support -Go 1.8 and 1.7, and Kafka 0.10 and 0.9, although older releases are +Go 1.9 and 1.8, and Kafka 1.0 through 0.10, although older releases are still likely to work. Sarama follows semantic versioning and provides API stability via the gopkg.in service. diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go new file mode 100644 index 00000000000..51517359abc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_bindings.go @@ -0,0 +1,119 @@ +package sarama + +type Resource struct { + ResourceType AclResourceType + ResourceName string +} + +func (r *Resource) encode(pe packetEncoder) error { + pe.putInt8(int8(r.ResourceType)) + + if err := pe.putString(r.ResourceName); err != nil { + return err + } + + return nil +} + +func (r *Resource) decode(pd packetDecoder, version int16) (err error) { + resourceType, err := pd.getInt8() + if err != nil { + return err + } + r.ResourceType = AclResourceType(resourceType) + + if r.ResourceName, err = pd.getString(); err != nil { + return err + } + + return nil +} + +type Acl struct { + Principal string + Host string + Operation AclOperation + PermissionType AclPermissionType +} + +func (a *Acl) encode(pe packetEncoder) error { + if err := pe.putString(a.Principal); err != nil { + return err + } + + if err := pe.putString(a.Host); err != nil { + return err + } + + pe.putInt8(int8(a.Operation)) + pe.putInt8(int8(a.PermissionType)) + + return nil +} + +func (a *Acl) decode(pd packetDecoder, version int16) (err error) { + if a.Principal, err = pd.getString(); err != nil { + return err + } + + if a.Host, err = pd.getString(); err != nil { + return err + } + + operation, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = AclOperation(operation) + + permissionType, err := pd.getInt8() + if err != nil { + return err + } + a.PermissionType = AclPermissionType(permissionType) + + return nil +} + +type ResourceAcls struct { + Resource + Acls []*Acl +} + +func (r *ResourceAcls) encode(pe packetEncoder) error { + if err := r.Resource.encode(pe); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Acls)); err != nil { + return err + } + for _, acl := range r.Acls { + if err := acl.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *ResourceAcls) decode(pd packetDecoder, version int16) error { + if err := r.Resource.decode(pd, version); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Acls = make([]*Acl, n) + for i := 0; i < n; i++ { + r.Acls[i] = new(Acl) + if err := r.Acls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go new file mode 100644 index 00000000000..0b6ecbec3e1 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_create_request.go @@ -0,0 +1,76 @@ +package sarama + +type CreateAclsRequest struct { + AclCreations []*AclCreation +} + +func (c *CreateAclsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.AclCreations)); err != nil { + return err + } + + for _, aclCreation := range c.AclCreations { + if err := aclCreation.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.AclCreations = make([]*AclCreation, n) + + for i := 0; i < n; i++ { + c.AclCreations[i] = new(AclCreation) + if err := c.AclCreations[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *CreateAclsRequest) key() int16 { + return 30 +} + +func (d *CreateAclsRequest) version() int16 { + return 0 +} + +func (d *CreateAclsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type AclCreation struct { + Resource + Acl +} + +func (a *AclCreation) encode(pe packetEncoder) error { + if err := a.Resource.encode(pe); err != nil { + return err + } + if err := a.Acl.encode(pe); err != nil { + return err + } + + return nil +} + +func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) { + if err := a.Resource.decode(pd, version); err != nil { + return err + } + if err := a.Acl.decode(pd, version); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go new file mode 100644 index 00000000000..8a56f357354 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_create_response.go @@ -0,0 +1,88 @@ +package sarama + +import "time" + +type CreateAclsResponse struct { + ThrottleTime time.Duration + AclCreationResponses []*AclCreationResponse +} + +func (c *CreateAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil { + return err + } + + for _, aclCreationResponse := range c.AclCreationResponses { + if err := aclCreationResponse.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.AclCreationResponses = make([]*AclCreationResponse, n) + for i := 0; i < n; i++ { + c.AclCreationResponses[i] = new(AclCreationResponse) + if err := c.AclCreationResponses[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *CreateAclsResponse) key() int16 { + return 30 +} + +func (d *CreateAclsResponse) version() int16 { + return 0 +} + +func (d *CreateAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type AclCreationResponse struct { + Err KError + ErrMsg *string +} + +func (a *AclCreationResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(a.Err)) + + if err := pe.putNullableString(a.ErrMsg); err != nil { + return err + } + + return nil +} + +func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + a.Err = KError(kerr) + + if a.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go new file mode 100644 index 00000000000..4133dceab71 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go @@ -0,0 +1,48 @@ +package sarama + +type DeleteAclsRequest struct { + Filters []*AclFilter +} + +func (d *DeleteAclsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(d.Filters)); err != nil { + return err + } + + for _, filter := range d.Filters { + if err := filter.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + d.Filters = make([]*AclFilter, n) + for i := 0; i < n; i++ { + d.Filters[i] = new(AclFilter) + if err := d.Filters[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsRequest) key() int16 { + return 31 +} + +func (d *DeleteAclsRequest) version() int16 { + return 0 +} + +func (d *DeleteAclsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go new file mode 100644 index 00000000000..b5e1c45eb5d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go @@ -0,0 +1,155 @@ +package sarama + +import "time" + +type DeleteAclsResponse struct { + ThrottleTime time.Duration + FilterResponses []*FilterResponse +} + +func (a *DeleteAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(a.FilterResponses)); err != nil { + return err + } + + for _, filterResponse := range a.FilterResponses { + if err := filterResponse.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + a.FilterResponses = make([]*FilterResponse, n) + + for i := 0; i < n; i++ { + a.FilterResponses[i] = new(FilterResponse) + if err := a.FilterResponses[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsResponse) key() int16 { + return 31 +} + +func (d *DeleteAclsResponse) version() int16 { + return 0 +} + +func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type FilterResponse struct { + Err KError + ErrMsg *string + MatchingAcls []*MatchingAcl +} + +func (f *FilterResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(f.Err)) + if err := pe.putNullableString(f.ErrMsg); err != nil { + return err + } + + if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil { + return err + } + for _, matchingAcl := range f.MatchingAcls { + if err := matchingAcl.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + f.Err = KError(kerr) + + if f.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + f.MatchingAcls = make([]*MatchingAcl, n) + for i := 0; i < n; i++ { + f.MatchingAcls[i] = new(MatchingAcl) + if err := f.MatchingAcls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +type MatchingAcl struct { + Err KError + ErrMsg *string + Resource + Acl +} + +func (m *MatchingAcl) encode(pe packetEncoder) error { + pe.putInt16(int16(m.Err)) + if err := pe.putNullableString(m.ErrMsg); err != nil { + return err + } + + if err := m.Resource.encode(pe); err != nil { + return err + } + + if err := m.Acl.encode(pe); err != nil { + return err + } + + return nil +} + +func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + m.Err = KError(kerr) + + if m.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + if err := m.Resource.decode(pd, version); err != nil { + return err + } + + if err := m.Acl.decode(pd, version); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go new file mode 100644 index 00000000000..02a5a1f0e22 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go @@ -0,0 +1,25 @@ +package sarama + +type DescribeAclsRequest struct { + AclFilter +} + +func (d *DescribeAclsRequest) encode(pe packetEncoder) error { + return d.AclFilter.encode(pe) +} + +func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) { + return d.AclFilter.decode(pd, version) +} + +func (d *DescribeAclsRequest) key() int16 { + return 29 +} + +func (d *DescribeAclsRequest) version() int16 { + return 0 +} + +func (d *DescribeAclsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go new file mode 100644 index 00000000000..5bc9497f4c5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go @@ -0,0 +1,80 @@ +package sarama + +import "time" + +type DescribeAclsResponse struct { + ThrottleTime time.Duration + Err KError + ErrMsg *string + ResourceAcls []*ResourceAcls +} + +func (d *DescribeAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(d.Err)) + + if err := pe.putNullableString(d.ErrMsg); err != nil { + return err + } + + if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil { + return err + } + + for _, resourceAcl := range d.ResourceAcls { + if err := resourceAcl.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + d.Err = KError(kerr) + + errmsg, err := pd.getString() + if err != nil { + return err + } + if errmsg != "" { + d.ErrMsg = &errmsg + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + d.ResourceAcls = make([]*ResourceAcls, n) + + for i := 0; i < n; i++ { + d.ResourceAcls[i] = new(ResourceAcls) + if err := d.ResourceAcls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeAclsResponse) key() int16 { + return 29 +} + +func (d *DescribeAclsResponse) version() int16 { + return 0 +} + +func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go new file mode 100644 index 00000000000..97063542198 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_filter.go @@ -0,0 +1,61 @@ +package sarama + +type AclFilter struct { + ResourceType AclResourceType + ResourceName *string + Principal *string + Host *string + Operation AclOperation + PermissionType AclPermissionType +} + +func (a *AclFilter) encode(pe packetEncoder) error { + pe.putInt8(int8(a.ResourceType)) + if err := pe.putNullableString(a.ResourceName); err != nil { + return err + } + if err := pe.putNullableString(a.Principal); err != nil { + return err + } + if err := pe.putNullableString(a.Host); err != nil { + return err + } + pe.putInt8(int8(a.Operation)) + pe.putInt8(int8(a.PermissionType)) + + return nil +} + +func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) { + resourceType, err := pd.getInt8() + if err != nil { + return err + } + a.ResourceType = AclResourceType(resourceType) + + if a.ResourceName, err = pd.getNullableString(); err != nil { + return err + } + + if a.Principal, err = pd.getNullableString(); err != nil { + return err + } + + if a.Host, err = pd.getNullableString(); err != nil { + return err + } + + operation, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = AclOperation(operation) + + permissionType, err := pd.getInt8() + if err != nil { + return err + } + a.PermissionType = AclPermissionType(permissionType) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go new file mode 100644 index 00000000000..19da6f2f451 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_types.go @@ -0,0 +1,42 @@ +package sarama + +type AclOperation int + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java +const ( + AclOperationUnknown AclOperation = 0 + AclOperationAny AclOperation = 1 + AclOperationAll AclOperation = 2 + AclOperationRead AclOperation = 3 + AclOperationWrite AclOperation = 4 + AclOperationCreate AclOperation = 5 + AclOperationDelete AclOperation = 6 + AclOperationAlter AclOperation = 7 + AclOperationDescribe AclOperation = 8 + AclOperationClusterAction AclOperation = 9 + AclOperationDescribeConfigs AclOperation = 10 + AclOperationAlterConfigs AclOperation = 11 + AclOperationIdempotentWrite AclOperation = 12 +) + +type AclPermissionType int + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java +const ( + AclPermissionUnknown AclPermissionType = 0 + AclPermissionAny AclPermissionType = 1 + AclPermissionDeny AclPermissionType = 2 + AclPermissionAllow AclPermissionType = 3 +) + +type AclResourceType int + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java +const ( + AclResourceUnknown AclResourceType = 0 + AclResourceAny AclResourceType = 1 + AclResourceTopic AclResourceType = 2 + AclResourceGroup AclResourceType = 3 + AclResourceCluster AclResourceType = 4 + AclResourceTransactionalID AclResourceType = 5 +) diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go new file mode 100644 index 00000000000..6da166c634b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go @@ -0,0 +1,52 @@ +package sarama + +type AddOffsetsToTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + GroupID string +} + +func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + + pe.putInt64(a.ProducerID) + + pe.putInt16(a.ProducerEpoch) + + if err := pe.putString(a.GroupID); err != nil { + return err + } + + return nil +} + +func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + if a.GroupID, err = pd.getString(); err != nil { + return err + } + return nil +} + +func (a *AddOffsetsToTxnRequest) key() int16 { + return 25 +} + +func (a *AddOffsetsToTxnRequest) version() int16 { + return 0 +} + +func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go new file mode 100644 index 00000000000..3a46151a050 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go @@ -0,0 +1,44 @@ +package sarama + +import ( + "time" +) + +type AddOffsetsToTxnResponse struct { + ThrottleTime time.Duration + Err KError +} + +func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(a.Err)) + return nil +} + +func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + a.Err = KError(kerr) + + return nil +} + +func (a *AddOffsetsToTxnResponse) key() int16 { + return 25 +} + +func (a *AddOffsetsToTxnResponse) version() int16 { + return 0 +} + +func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go new file mode 100644 index 00000000000..a8a59225e4d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go @@ -0,0 +1,76 @@ +package sarama + +type AddPartitionsToTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + TopicPartitions map[string][]int32 +} + +func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + pe.putInt64(a.ProducerID) + pe.putInt16(a.ProducerEpoch) + + if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil { + return err + } + for topic, partitions := range a.TopicPartitions { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + return nil +} + +func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + a.TopicPartitions = make(map[string][]int32) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + partitions, err := pd.getInt32Array() + if err != nil { + return err + } + + a.TopicPartitions[topic] = partitions + } + + return nil +} + +func (a *AddPartitionsToTxnRequest) key() int16 { + return 24 +} + +func (a *AddPartitionsToTxnRequest) version() int16 { + return 0 +} + +func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go new file mode 100644 index 00000000000..581c556c5ce --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go @@ -0,0 +1,108 @@ +package sarama + +import ( + "time" +) + +type AddPartitionsToTxnResponse struct { + ThrottleTime time.Duration + Errors map[string][]*PartitionError +} + +func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(a.Errors)); err != nil { + return err + } + + for topic, e := range a.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(e)); err != nil { + return err + } + for _, partitionError := range e { + if err := partitionError.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Errors = make(map[string][]*PartitionError) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Errors[topic] = make([]*PartitionError, m) + + for j := 0; j < m; j++ { + a.Errors[topic][j] = new(PartitionError) + if err := a.Errors[topic][j].decode(pd, version); err != nil { + return err + } + } + } + + return nil +} + +func (a *AddPartitionsToTxnResponse) key() int16 { + return 24 +} + +func (a *AddPartitionsToTxnResponse) version() int16 { + return 0 +} + +func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type PartitionError struct { + Partition int32 + Err KError +} + +func (p *PartitionError) encode(pe packetEncoder) error { + pe.putInt32(p.Partition) + pe.putInt16(int16(p.Err)) + return nil +} + +func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) { + if p.Partition, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + p.Err = KError(kerr) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go new file mode 100644 index 00000000000..48c44ead67a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go @@ -0,0 +1,120 @@ +package sarama + +type AlterConfigsRequest struct { + Resources []*AlterConfigsResource + ValidateOnly bool +} + +type AlterConfigsResource struct { + Type ConfigResourceType + Name string + ConfigEntries map[string]*string +} + +func (acr *AlterConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(acr.Resources)); err != nil { + return err + } + + for _, r := range acr.Resources { + if err := r.encode(pe); err != nil { + return err + } + } + + pe.putBool(acr.ValidateOnly) + return nil +} + +func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { + resourceCount, err := pd.getArrayLength() + if err != nil { + return err + } + + acr.Resources = make([]*AlterConfigsResource, resourceCount) + for i := range acr.Resources { + r := &AlterConfigsResource{} + err = r.decode(pd, version) + if err != nil { + return err + } + acr.Resources[i] = r + } + + validateOnly, err := pd.getBool() + if err != nil { + return err + } + + acr.ValidateOnly = validateOnly + + return nil +} + +func (ac *AlterConfigsResource) encode(pe packetEncoder) error { + pe.putInt8(int8(ac.Type)) + + if err := pe.putString(ac.Name); err != nil { + return err + } + + if err := pe.putArrayLength(len(ac.ConfigEntries)); err != nil { + return err + } + for configKey, configValue := range ac.ConfigEntries { + if err := pe.putString(configKey); err != nil { + return err + } + if err := pe.putNullableString(configValue); err != nil { + return err + } + } + + return nil +} + +func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error { + t, err := pd.getInt8() + if err != nil { + return err + } + ac.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + ac.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + ac.ConfigEntries = make(map[string]*string, n) + for i := 0; i < n; i++ { + configKey, err := pd.getString() + if err != nil { + return err + } + if ac.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + return err + } + } + } + return err +} + +func (acr *AlterConfigsRequest) key() int16 { + return 33 +} + +func (acr *AlterConfigsRequest) version() int16 { + return 0 +} + +func (acr *AlterConfigsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go new file mode 100644 index 00000000000..29b09e1ff84 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go @@ -0,0 +1,95 @@ +package sarama + +import "time" + +type AlterConfigsResponse struct { + ThrottleTime time.Duration + Resources []*AlterConfigsResourceResponse +} + +type AlterConfigsResourceResponse struct { + ErrorCode int16 + ErrorMsg string + Type ConfigResourceType + Name string +} + +func (ct *AlterConfigsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(ct.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(ct.Resources)); err != nil { + return err + } + + for i := range ct.Resources { + pe.putInt16(ct.Resources[i].ErrorCode) + err := pe.putString(ct.Resources[i].ErrorMsg) + if err != nil { + return nil + } + pe.putInt8(int8(ct.Resources[i].Type)) + err = pe.putString(ct.Resources[i].Name) + if err != nil { + return nil + } + } + + return nil +} + +func (acr *AlterConfigsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + acr.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + responseCount, err := pd.getArrayLength() + if err != nil { + return err + } + + acr.Resources = make([]*AlterConfigsResourceResponse, responseCount) + + for i := range acr.Resources { + acr.Resources[i] = new(AlterConfigsResourceResponse) + + errCode, err := pd.getInt16() + if err != nil { + return err + } + acr.Resources[i].ErrorCode = errCode + + e, err := pd.getString() + if err != nil { + return err + } + acr.Resources[i].ErrorMsg = e + + t, err := pd.getInt8() + if err != nil { + return err + } + acr.Resources[i].Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + acr.Resources[i].Name = name + } + + return nil +} + +func (r *AlterConfigsResponse) key() int16 { + return 32 +} + +func (r *AlterConfigsResponse) version() int16 { + return 0 +} + +func (r *AlterConfigsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go index 6d71a6d8feb..1eff81cbf62 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -1,6 +1,7 @@ package sarama import ( + "encoding/binary" "fmt" "sync" "time" @@ -119,6 +120,10 @@ type ProducerMessage struct { // StringEncoder and ByteEncoder. Value Encoder + // The headers are key-value pairs that are transparently passed + // by Kafka between producers and consumers. + Headers []RecordHeader + // This field is used to hold arbitrary data you wish to include so it // will be available when receiving on the Successes and Errors channels. // Sarama completely ignores this field and is only to be used for @@ -146,8 +151,16 @@ type ProducerMessage struct { const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. -func (m *ProducerMessage) byteSize() int { - size := producerMessageOverhead +func (m *ProducerMessage) byteSize(version int) int { + var size int + if version >= 2 { + size = maximumRecordOverhead + for _, h := range m.Headers { + size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32 + } + } else { + size = producerMessageOverhead + } if m.Key != nil { size += m.Key.Length() } @@ -254,7 +267,11 @@ func (p *asyncProducer) dispatcher() { p.inFlight.Add(1) } - if msg.byteSize() > p.conf.Producer.MaxMessageBytes { + version := 1 + if p.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } + if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes { p.returnError(msg, ErrMessageSizeTooLarge) continue } diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go index f57a6909429..b759f8f7841 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -178,6 +178,13 @@ func (b *Broker) Close() error { b.done = nil b.responses = nil + if b.id >= 0 { + b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b)) + b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b)) + b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b)) + b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b)) + } + if err == nil { Logger.Printf("Closed connection to broker %s\n", b.addr) } else { @@ -366,6 +373,137 @@ func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, return response, nil } +func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) { + response := new(CreateTopicsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) { + response := new(DeleteTopicsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) { + response := new(DescribeAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) { + response := new(CreateAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) { + response := new(DeleteAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) { + response := new(InitProducerIDResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) { + response := new(AddPartitionsToTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) { + response := new(AddOffsetsToTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) { + response := new(EndTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) { + response := new(TxnOffsetCommitResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) { + response := new(DescribeConfigsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) { + response := new(AlterConfigsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) { b.lock.Lock() defer b.lock.Unlock() diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go index 45de3973d55..3dbfc4b06ff 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/Shopify/sarama/client.go @@ -49,9 +49,9 @@ type Client interface { RefreshMetadata(topics ...string) error // GetOffset queries the cluster to get the most recent available offset at the - // given time on the topic/partition combination. Time should be OffsetOldest for - // the earliest available offset, OffsetNewest for the offset of the message that - // will be produced next, or a time. + // given time (in milliseconds) on the topic/partition combination. + // Time should be OffsetOldest for the earliest available offset, + // OffsetNewest for the offset of the message that will be produced next, or a time. GetOffset(topic string, partitionID int32, time int64) (int64, error) // Coordinator returns the coordinating broker for a consumer group. It will @@ -141,18 +141,20 @@ func NewClient(addrs []string, conf *Config) (Client, error) { client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) } - // do an initial fetch of all cluster metadata by specifying an empty list of topics - err := client.RefreshMetadata() - switch err { - case nil: - break - case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: - // indicates that maybe part of the cluster is down, but is not fatal to creating the client - Logger.Println(err) - default: - close(client.closed) // we haven't started the background updater yet, so we have to do this manually - _ = client.Close() - return nil, err + if conf.Metadata.Full { + // do an initial fetch of all cluster metadata by specifying an empty list of topics + err := client.RefreshMetadata() + switch err { + case nil: + break + case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: + // indicates that maybe part of the cluster is down, but is not fatal to creating the client + Logger.Println(err) + default: + close(client.closed) // we haven't started the background updater yet, so we have to do this manually + _ = client.Close() + return nil, err + } } go withRecover(client.backgroundMetadataUpdater) @@ -295,9 +297,9 @@ func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) } if metadata.Err == ErrReplicaNotAvailable { - return nil, metadata.Err + return dupInt32Slice(metadata.Replicas), metadata.Err } - return dupeAndSort(metadata.Replicas), nil + return dupInt32Slice(metadata.Replicas), nil } func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { @@ -320,9 +322,9 @@ func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, } if metadata.Err == ErrReplicaNotAvailable { - return nil, metadata.Err + return dupInt32Slice(metadata.Isr), metadata.Err } - return dupeAndSort(metadata.Isr), nil + return dupInt32Slice(metadata.Isr), nil } func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { @@ -605,7 +607,20 @@ func (client *client) backgroundMetadataUpdater() { for { select { case <-ticker.C: - if err := client.RefreshMetadata(); err != nil { + topics := []string{} + if !client.conf.Metadata.Full { + if specificTopics, err := client.Topics(); err != nil { + Logger.Println("Client background metadata topic load:", err) + break + } else if len(specificTopics) == 0 { + Logger.Println("Client background metadata update: no specific topics to update") + break + } else { + topics = specificTopics + } + } + + if err := client.RefreshMetadata(topics...); err != nil { Logger.Println("Client background metadata update:", err) } case <-client.closer: diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go index 5021c57e908..29ea5c2b36e 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/Shopify/sarama/config.go @@ -72,6 +72,12 @@ type Config struct { // Defaults to 10 minutes. Set to 0 to disable. Similar to // `topic.metadata.refresh.interval.ms` in the JVM version. RefreshFrequency time.Duration + + // Whether to maintain a full set of metadata for all topics, or just + // the minimal set that has been necessary so far. The full set is simpler + // and usually more convenient, but can take up a substantial amount of + // memory if you have many topics and partitions. Defaults to true. + Full bool } // Producer is the namespace for configuration related to producing messages, @@ -99,7 +105,10 @@ type Config struct { Partitioner PartitionerConstructor // Return specifies what channels will be populated. If they are set to true, - // you must read from the respective channels to prevent deadlock. + // you must read from the respective channels to prevent deadlock. If, + // however, this config is used to create a `SyncProducer`, both must be set + // to true and you shall not read from the channels since the producer does + // this internally. Return struct { // If enabled, successfully delivered messages will be returned on the // Successes channel (default disabled). @@ -166,7 +175,7 @@ type Config struct { // Equivalent to the JVM's `fetch.min.bytes`. Min int32 // The default number of message bytes to fetch from the broker in each - // request (default 32768). This should be larger than the majority of + // request (default 1MB). This should be larger than the majority of // your messages, or else the consumer will spend a lot of time // negotiating sizes and not actually consuming. Similar to the JVM's // `fetch.message.max.bytes`. @@ -187,11 +196,23 @@ type Config struct { // Equivalent to the JVM's `fetch.wait.max.ms`. MaxWaitTime time.Duration - // The maximum amount of time the consumer expects a message takes to process - // for the user. If writing to the Messages channel takes longer than this, - // that partition will stop fetching more messages until it can proceed again. + // The maximum amount of time the consumer expects a message takes to + // process for the user. If writing to the Messages channel takes longer + // than this, that partition will stop fetching more messages until it + // can proceed again. // Note that, since the Messages channel is buffered, the actual grace time is // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms. + // If a message is not written to the Messages channel between two ticks + // of the expiryTicker then a timeout is detected. + // Using a ticker instead of a timer to detect timeouts should typically + // result in many fewer calls to Timer functions which may result in a + // significant performance improvement if many messages are being sent + // and timeouts are infrequent. + // The disadvantage of using a ticker instead of a timer is that + // timeouts will be less accurate. That is, the effective timeout could + // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For + // example, if `MaxProcessingTime` is 100ms then a delay of 180ms + // between two messages being sent may not be recognized as a timeout. MaxProcessingTime time.Duration // Return specifies what channels will be populated. If they are set to true, @@ -260,6 +281,7 @@ func NewConfig() *Config { c.Metadata.Retry.Max = 3 c.Metadata.Retry.Backoff = 250 * time.Millisecond c.Metadata.RefreshFrequency = 10 * time.Minute + c.Metadata.Full = true c.Producer.MaxMessageBytes = 1000000 c.Producer.RequiredAcks = WaitForLocal @@ -270,7 +292,7 @@ func NewConfig() *Config { c.Producer.Return.Errors = true c.Consumer.Fetch.Min = 1 - c.Consumer.Fetch.Default = 32768 + c.Consumer.Fetch.Default = 1024 * 1024 c.Consumer.Retry.Backoff = 2 * time.Second c.Consumer.MaxWaitTime = 250 * time.Millisecond c.Consumer.MaxProcessingTime = 100 * time.Millisecond diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go new file mode 100644 index 00000000000..848cc9c90c5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/config_resource_type.go @@ -0,0 +1,15 @@ +package sarama + +type ConfigResourceType int8 + +// Taken from : +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes + +const ( + UnknownResource ConfigResourceType = 0 + AnyResource ConfigResourceType = 1 + TopicResource ConfigResourceType = 2 + GroupResource ConfigResourceType = 3 + ClusterResource ConfigResourceType = 4 + BrokerResource ConfigResourceType = 5 +) diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go index 78d7fa2caa6..48d231cf984 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -10,11 +10,13 @@ import ( // ConsumerMessage encapsulates a Kafka message returned by the consumer. type ConsumerMessage struct { - Key, Value []byte - Topic string - Partition int32 - Offset int64 - Timestamp time.Time // only set if kafka is version 0.10+ + Key, Value []byte + Topic string + Partition int32 + Offset int64 + Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp + BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp + Headers []*RecordHeader // only set if kafka is version 0.11+ } // ConsumerError is what is provided to the user when an error occurs. @@ -246,9 +248,9 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { // PartitionConsumer -// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close() -// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically -// when it passes out of scope. +// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or +// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out +// of scope. // // The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range // loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported @@ -257,19 +259,25 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { // By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set // your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement // or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. +// +// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of +// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process +// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call +// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will +// also drain the Messages channel, harvest all errors & return them once cleanup has completed. type PartitionConsumer interface { - // AsyncClose initiates a shutdown of the PartitionConsumer. This method will - // return immediately, after which you should wait until the 'messages' and - // 'errors' channel are drained. It is required to call this function, or - // Close before a consumer object passes out of scope, as it will otherwise - // leak memory. You must call this before calling Close on the underlying client. + // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you + // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this + // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call + // this before calling Close on the underlying client. AsyncClose() - // Close stops the PartitionConsumer from fetching messages. It is required to - // call this function (or AsyncClose) before a consumer object passes out of - // scope, as it will otherwise leak memory. You must call this before calling - // Close on the underlying client. + // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain + // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service + // the Messages channel when this function is called, you will be competing with Close for messages; consider + // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes + // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client. Close() error // Messages returns the read channel for the messages that are returned by @@ -433,42 +441,126 @@ func (child *partitionConsumer) HighWaterMarkOffset() int64 { func (child *partitionConsumer) responseFeeder() { var msgs []*ConsumerMessage - expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime) - expireTimedOut := false + expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime) + firstAttempt := true feederLoop: for response := range child.feeder { msgs, child.responseResult = child.parseResponse(response) for i, msg := range msgs { - if !expiryTimer.Stop() && !expireTimedOut { - // expiryTimer was expired; clear out the waiting msg - <-expiryTimer.C - } - expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime) - expireTimedOut = false - + messageSelect: select { case child.messages <- msg: - case <-expiryTimer.C: - expireTimedOut = true - child.responseResult = errTimedOut - child.broker.acks.Done() - for _, msg = range msgs[i:] { - child.messages <- msg + firstAttempt = true + case <-expiryTicker.C: + if !firstAttempt { + child.responseResult = errTimedOut + child.broker.acks.Done() + for _, msg = range msgs[i:] { + child.messages <- msg + } + child.broker.input <- child + expiryTicker.Stop() + continue feederLoop + } else { + // current message has not been sent, return to select + // statement + firstAttempt = false + goto messageSelect } - child.broker.input <- child - continue feederLoop } } child.broker.acks.Done() } + expiryTicker.Stop() close(child.messages) close(child.errors) } +func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) { + var messages []*ConsumerMessage + var incomplete bool + prelude := true + + for _, msgBlock := range msgSet.Messages { + for _, msg := range msgBlock.Messages() { + offset := msg.Offset + if msg.Msg.Version >= 1 { + baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset + offset += baseOffset + } + if prelude && offset < child.offset { + continue + } + prelude = false + + if offset >= child.offset { + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: msg.Msg.Key, + Value: msg.Msg.Value, + Offset: offset, + Timestamp: msg.Msg.Timestamp, + BlockTimestamp: msgBlock.Msg.Timestamp, + }) + child.offset = offset + 1 + } else { + incomplete = true + } + } + } + + if incomplete || len(messages) == 0 { + return nil, ErrIncompleteResponse + } + return messages, nil +} + +func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) { + var messages []*ConsumerMessage + var incomplete bool + prelude := true + originalOffset := child.offset + + for _, rec := range batch.Records { + offset := batch.FirstOffset + rec.OffsetDelta + if prelude && offset < child.offset { + continue + } + prelude = false + + if offset >= child.offset { + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: rec.Key, + Value: rec.Value, + Offset: offset, + Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta), + Headers: rec.Headers, + }) + child.offset = offset + 1 + } else { + incomplete = true + } + } + + if incomplete { + return nil, ErrIncompleteResponse + } + + child.offset = batch.FirstOffset + int64(batch.LastOffsetDelta) + 1 + if child.offset <= originalOffset { + return nil, ErrConsumerOffsetNotAdvanced + } + + return messages, nil +} + func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { block := response.GetBlock(child.topic, child.partition) if block == nil { @@ -479,10 +571,18 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu return nil, block.Err } - if len(block.MsgSet.Messages) == 0 { + nRecs, err := block.numRecords() + if err != nil { + return nil, err + } + if nRecs == 0 { + partialTrailingMessage, err := block.isPartial() + if err != nil { + return nil, err + } // We got no messages. If we got a trailing one then we need to ask for more data. // Otherwise we just poll again and wait for one to be produced... - if block.MsgSet.PartialTrailingMessage { + if partialTrailingMessage { if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { // we can't ask for more data, we've hit the configured limit child.sendError(ErrMessageTooLarge) @@ -502,42 +602,32 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu child.fetchSize = child.conf.Consumer.Fetch.Default atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) - incomplete := false - prelude := true - var messages []*ConsumerMessage - for _, msgBlock := range block.MsgSet.Messages { + messages := []*ConsumerMessage{} + for _, records := range block.RecordsSet { + if control, err := records.isControl(); err != nil || control { + continue + } - for _, msg := range msgBlock.Messages() { - offset := msg.Offset - if msg.Msg.Version >= 1 { - baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset - offset += baseOffset - } - if prelude && offset < child.offset { - continue + switch records.recordsType { + case legacyRecords: + messageSetMessages, err := child.parseMessages(records.msgSet) + if err != nil { + return nil, err } - prelude = false - if offset >= child.offset { - messages = append(messages, &ConsumerMessage{ - Topic: child.topic, - Partition: child.partition, - Key: msg.Msg.Key, - Value: msg.Msg.Value, - Offset: offset, - Timestamp: msg.Msg.Timestamp, - }) - child.offset = offset + 1 - } else { - incomplete = true + messages = append(messages, messageSetMessages...) + case defaultRecords: + recordBatchMessages, err := child.parseRecords(records.recordBatch) + if err != nil { + return nil, err } - } + messages = append(messages, recordBatchMessages...) + default: + return nil, fmt.Errorf("unknown records type: %v", records.recordsType) + } } - if incomplete || len(messages) == 0 { - return nil, ErrIncompleteResponse - } return messages, nil } @@ -726,6 +816,14 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { request.Version = 2 } + if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 3 + request.MaxBytes = MaxResponseSize + } + if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 4 + request.Isolation = ReadUncommitted // We don't support yet transactions. + } for child := range bc.subscriptions { request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go index f4fde18ad25..1f144431a8b 100644 --- a/vendor/github.com/Shopify/sarama/crc32_field.go +++ b/vendor/github.com/Shopify/sarama/crc32_field.go @@ -2,12 +2,23 @@ package sarama import ( "encoding/binary" + "fmt" "hash/crc32" ) +type crcPolynomial int8 + +const ( + crcIEEE crcPolynomial = iota + crcCastagnoli +) + +var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) + // crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. type crc32Field struct { startOffset int + polynomial crcPolynomial } func (c *crc32Field) saveOffset(in int) { @@ -18,18 +29,41 @@ func (c *crc32Field) reserveLength() int { return 4 } +func newCRC32Field(polynomial crcPolynomial) *crc32Field { + return &crc32Field{polynomial: polynomial} +} + func (c *crc32Field) run(curOffset int, buf []byte) error { - crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } binary.BigEndian.PutUint32(buf[c.startOffset:], crc) return nil } func (c *crc32Field) check(curOffset int, buf []byte) error { - crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } - if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) { - return PacketDecodingError{"CRC didn't match"} + expected := binary.BigEndian.Uint32(buf[c.startOffset:]) + if crc != expected { + return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)} } return nil } +func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) { + var tab *crc32.Table + switch c.polynomial { + case crcIEEE: + tab = crc32.IEEETable + case crcCastagnoli: + tab = castagnoliTable + default: + return 0, PacketDecodingError{"invalid CRC type"} + } + return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go new file mode 100644 index 00000000000..af321e99466 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go @@ -0,0 +1,121 @@ +package sarama + +import "time" + +type CreatePartitionsRequest struct { + TopicPartitions map[string]*TopicPartition + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreatePartitionsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil { + return err + } + + for topic, partition := range c.TopicPartitions { + if err := pe.putString(topic); err != nil { + return err + } + if err := partition.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + pe.putBool(c.ValidateOnly) + + return nil +} + +func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + c.TopicPartitions = make(map[string]*TopicPartition, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitions[topic] = new(TopicPartition) + if err := c.TopicPartitions[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if c.ValidateOnly, err = pd.getBool(); err != nil { + return err + } + + return nil +} + +func (r *CreatePartitionsRequest) key() int16 { + return 37 +} + +func (r *CreatePartitionsRequest) version() int16 { + return 0 +} + +func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartition struct { + Count int32 + Assignment [][]int32 +} + +func (t *TopicPartition) encode(pe packetEncoder) error { + pe.putInt32(t.Count) + + if len(t.Assignment) == 0 { + pe.putInt32(-1) + return nil + } + + if err := pe.putArrayLength(len(t.Assignment)); err != nil { + return err + } + + for _, assign := range t.Assignment { + if err := pe.putInt32Array(assign); err != nil { + return err + } + } + + return nil +} + +func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) { + if t.Count, err = pd.getInt32(); err != nil { + return err + } + + n, err := pd.getInt32() + if err != nil { + return err + } + if n <= 0 { + return nil + } + t.Assignment = make([][]int32, n) + + for i := 0; i < int(n); i++ { + if t.Assignment[i], err = pd.getInt32Array(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go new file mode 100644 index 00000000000..abd621c64ec --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go @@ -0,0 +1,94 @@ +package sarama + +import "time" + +type CreatePartitionsResponse struct { + ThrottleTime time.Duration + TopicPartitionErrors map[string]*TopicPartitionError +} + +func (c *CreatePartitionsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil { + return err + } + + for topic, partitionError := range c.TopicPartitionErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := partitionError.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitionErrors[topic] = new(TopicPartitionError) + if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (r *CreatePartitionsResponse) key() int16 { + return 37 +} + +func (r *CreatePartitionsResponse) version() int16 { + return 0 +} + +func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartitionError struct { + Err KError + ErrMsg *string +} + +func (t *TopicPartitionError) encode(pe packetEncoder) error { + pe.putInt16(int16(t.Err)) + + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + + return nil +} + +func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kerr) + + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go new file mode 100644 index 00000000000..709c0a44e71 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_request.go @@ -0,0 +1,174 @@ +package sarama + +import ( + "time" +) + +type CreateTopicsRequest struct { + Version int16 + + TopicDetails map[string]*TopicDetail + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreateTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicDetails)); err != nil { + return err + } + for topic, detail := range c.TopicDetails { + if err := pe.putString(topic); err != nil { + return err + } + if err := detail.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + if c.Version >= 1 { + pe.putBool(c.ValidateOnly) + } + + return nil +} + +func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicDetails = make(map[string]*TopicDetail, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicDetails[topic] = new(TopicDetail) + if err = c.TopicDetails[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if version >= 1 { + c.ValidateOnly, err = pd.getBool() + if err != nil { + return err + } + + c.Version = version + } + + return nil +} + +func (c *CreateTopicsRequest) key() int16 { + return 19 +} + +func (c *CreateTopicsRequest) version() int16 { + return c.Version +} + +func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicDetail struct { + NumPartitions int32 + ReplicationFactor int16 + ReplicaAssignment map[int32][]int32 + ConfigEntries map[string]*string +} + +func (t *TopicDetail) encode(pe packetEncoder) error { + pe.putInt32(t.NumPartitions) + pe.putInt16(t.ReplicationFactor) + + if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil { + return err + } + for partition, assignment := range t.ReplicaAssignment { + pe.putInt32(partition) + if err := pe.putInt32Array(assignment); err != nil { + return err + } + } + + if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil { + return err + } + for configKey, configValue := range t.ConfigEntries { + if err := pe.putString(configKey); err != nil { + return err + } + if err := pe.putNullableString(configValue); err != nil { + return err + } + } + + return nil +} + +func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) { + if t.NumPartitions, err = pd.getInt32(); err != nil { + return err + } + if t.ReplicationFactor, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ReplicaAssignment = make(map[int32][]int32, n) + for i := 0; i < n; i++ { + replica, err := pd.getInt32() + if err != nil { + return err + } + if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil { + return err + } + } + } + + n, err = pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ConfigEntries = make(map[string]*string, n) + for i := 0; i < n; i++ { + configKey, err := pd.getString() + if err != nil { + return err + } + if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go new file mode 100644 index 00000000000..66207e00c5d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_response.go @@ -0,0 +1,112 @@ +package sarama + +import "time" + +type CreateTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrors map[string]*TopicError +} + +func (c *CreateTopicsResponse) encode(pe packetEncoder) error { + if c.Version >= 2 { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(c.TopicErrors)); err != nil { + return err + } + for topic, topicError := range c.TopicErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := topicError.encode(pe, c.Version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + c.Version = version + + if version >= 2 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicErrors = make(map[string]*TopicError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicErrors[topic] = new(TopicError) + if err := c.TopicErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) key() int16 { + return 19 +} + +func (c *CreateTopicsResponse) version() int16 { + return c.Version +} + +func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicError struct { + Err KError + ErrMsg *string +} + +func (t *TopicError) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(t.Err)) + + if version >= 1 { + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + } + + return nil +} + +func (t *TopicError) decode(pd packetDecoder, version int16) (err error) { + kErr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kErr) + + if version >= 1 { + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go new file mode 100644 index 00000000000..ed9089ea478 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go @@ -0,0 +1,41 @@ +package sarama + +import "time" + +type DeleteTopicsRequest struct { + Topics []string + Timeout time.Duration +} + +func (d *DeleteTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putStringArray(d.Topics); err != nil { + return err + } + pe.putInt32(int32(d.Timeout / time.Millisecond)) + + return nil +} + +func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + if d.Topics, err = pd.getStringArray(); err != nil { + return err + } + timeout, err := pd.getInt32() + if err != nil { + return err + } + d.Timeout = time.Duration(timeout) * time.Millisecond + return nil +} + +func (d *DeleteTopicsRequest) key() int16 { + return 20 +} + +func (d *DeleteTopicsRequest) version() int16 { + return 0 +} + +func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { + return V0_10_1_0 +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go new file mode 100644 index 00000000000..34225460a31 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go @@ -0,0 +1,78 @@ +package sarama + +import "time" + +type DeleteTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrorCodes map[string]KError +} + +func (d *DeleteTopicsResponse) encode(pe packetEncoder) error { + if d.Version >= 1 { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil { + return err + } + for topic, errorCode := range d.TopicErrorCodes { + if err := pe.putString(topic); err != nil { + return err + } + pe.putInt16(int16(errorCode)) + } + + return nil +} + +func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 1 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + d.Version = version + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + d.TopicErrorCodes = make(map[string]KError, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + errorCode, err := pd.getInt16() + if err != nil { + return err + } + + d.TopicErrorCodes[topic] = KError(errorCode) + } + + return nil +} + +func (d *DeleteTopicsResponse) key() int16 { + return 20 +} + +func (d *DeleteTopicsResponse) version() int16 { + return d.Version +} + +func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go new file mode 100644 index 00000000000..7a7cffc3fb2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go @@ -0,0 +1,91 @@ +package sarama + +type ConfigResource struct { + Type ConfigResourceType + Name string + ConfigNames []string +} + +type DescribeConfigsRequest struct { + Resources []*ConfigResource +} + +func (r *DescribeConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Resources)); err != nil { + return err + } + + for _, c := range r.Resources { + pe.putInt8(int8(c.Type)) + if err := pe.putString(c.Name); err != nil { + return err + } + + if len(c.ConfigNames) == 0 { + pe.putInt32(-1) + continue + } + if err := pe.putStringArray(c.ConfigNames); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Resources = make([]*ConfigResource, n) + + for i := 0; i < n; i++ { + r.Resources[i] = &ConfigResource{} + t, err := pd.getInt8() + if err != nil { + return err + } + r.Resources[i].Type = ConfigResourceType(t) + name, err := pd.getString() + if err != nil { + return err + } + r.Resources[i].Name = name + + confLength, err := pd.getArrayLength() + + if err != nil { + return err + } + + if confLength == -1 { + continue + } + + cfnames := make([]string, confLength) + for i := 0; i < confLength; i++ { + s, err := pd.getString() + if err != nil { + return err + } + cfnames[i] = s + } + r.Resources[i].ConfigNames = cfnames + } + + return nil +} + +func (r *DescribeConfigsRequest) key() int16 { + return 32 +} + +func (r *DescribeConfigsRequest) version() int16 { + return 0 +} + +func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go new file mode 100644 index 00000000000..6e5d30e4f09 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go @@ -0,0 +1,188 @@ +package sarama + +import "time" + +type DescribeConfigsResponse struct { + ThrottleTime time.Duration + Resources []*ResourceResponse +} + +type ResourceResponse struct { + ErrorCode int16 + ErrorMsg string + Type ConfigResourceType + Name string + Configs []*ConfigEntry +} + +type ConfigEntry struct { + Name string + Value string + ReadOnly bool + Default bool + Sensitive bool +} + +func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + if err = pe.putArrayLength(len(r.Resources)); err != nil { + return err + } + + for _, c := range r.Resources { + if err = c.encode(pe); err != nil { + return err + } + } + return nil +} + +func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Resources = make([]*ResourceResponse, n) + for i := 0; i < n; i++ { + rr := &ResourceResponse{} + if err := rr.decode(pd, version); err != nil { + return err + } + r.Resources[i] = rr + } + + return nil +} + +func (r *DescribeConfigsResponse) key() int16 { + return 32 +} + +func (r *DescribeConfigsResponse) version() int16 { + return 0 +} + +func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +func (r *ResourceResponse) encode(pe packetEncoder) (err error) { + pe.putInt16(r.ErrorCode) + + if err = pe.putString(r.ErrorMsg); err != nil { + return err + } + + pe.putInt8(int8(r.Type)) + + if err = pe.putString(r.Name); err != nil { + return err + } + + if err = pe.putArrayLength(len(r.Configs)); err != nil { + return err + } + + for _, c := range r.Configs { + if err = c.encode(pe); err != nil { + return err + } + } + return nil +} + +func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) { + ec, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = ec + + em, err := pd.getString() + if err != nil { + return err + } + r.ErrorMsg = em + + t, err := pd.getInt8() + if err != nil { + return err + } + r.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + r.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Configs = make([]*ConfigEntry, n) + for i := 0; i < n; i++ { + c := &ConfigEntry{} + if err := c.decode(pd, version); err != nil { + return err + } + r.Configs[i] = c + } + return nil +} + +func (r *ConfigEntry) encode(pe packetEncoder) (err error) { + if err = pe.putString(r.Name); err != nil { + return err + } + + if err = pe.putString(r.Value); err != nil { + return err + } + + pe.putBool(r.ReadOnly) + pe.putBool(r.Default) + pe.putBool(r.Sensitive) + return nil +} + +func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) { + name, err := pd.getString() + if err != nil { + return err + } + r.Name = name + + value, err := pd.getString() + if err != nil { + return err + } + r.Value = value + + read, err := pd.getBool() + if err != nil { + return err + } + r.ReadOnly = read + + de, err := pd.getBool() + if err != nil { + return err + } + r.Default = de + + sensitive, err := pd.getBool() + if err != nil { + return err + } + r.Sensitive = sensitive + return nil +} diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml new file mode 100644 index 00000000000..294fcdb413b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/dev.yml @@ -0,0 +1,10 @@ +name: sarama + +up: + - go: + version: '1.9' + +commands: + test: + run: make test + desc: 'run unit tests' diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go new file mode 100644 index 00000000000..2cd9b506d3f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/end_txn_request.go @@ -0,0 +1,50 @@ +package sarama + +type EndTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + TransactionResult bool +} + +func (a *EndTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + + pe.putInt64(a.ProducerID) + + pe.putInt16(a.ProducerEpoch) + + pe.putBool(a.TransactionResult) + + return nil +} + +func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + if a.TransactionResult, err = pd.getBool(); err != nil { + return err + } + return nil +} + +func (a *EndTxnRequest) key() int16 { + return 26 +} + +func (a *EndTxnRequest) version() int16 { + return 0 +} + +func (a *EndTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go new file mode 100644 index 00000000000..33b27e33d49 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/end_txn_response.go @@ -0,0 +1,44 @@ +package sarama + +import ( + "time" +) + +type EndTxnResponse struct { + ThrottleTime time.Duration + Err KError +} + +func (e *EndTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(e.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(e.Err)) + return nil +} + +func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + e.Err = KError(kerr) + + return nil +} + +func (e *EndTxnResponse) key() int16 { + return 25 +} + +func (e *EndTxnResponse) version() int16 { + return 0 +} + +func (e *EndTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go index e6800ed4929..54f431a4a91 100644 --- a/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/Shopify/sarama/errors.go @@ -37,6 +37,10 @@ var ErrShuttingDown = errors.New("kafka: message received by producer in process // ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") +// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing +// a RecordBatch. +var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch") + // PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, // if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. type PacketEncodingError struct { @@ -71,52 +75,68 @@ type KError int16 // Numeric error codes returned by the Kafka server. const ( - ErrNoError KError = 0 - ErrUnknown KError = -1 - ErrOffsetOutOfRange KError = 1 - ErrInvalidMessage KError = 2 - ErrUnknownTopicOrPartition KError = 3 - ErrInvalidMessageSize KError = 4 - ErrLeaderNotAvailable KError = 5 - ErrNotLeaderForPartition KError = 6 - ErrRequestTimedOut KError = 7 - ErrBrokerNotAvailable KError = 8 - ErrReplicaNotAvailable KError = 9 - ErrMessageSizeTooLarge KError = 10 - ErrStaleControllerEpochCode KError = 11 - ErrOffsetMetadataTooLarge KError = 12 - ErrNetworkException KError = 13 - ErrOffsetsLoadInProgress KError = 14 - ErrConsumerCoordinatorNotAvailable KError = 15 - ErrNotCoordinatorForConsumer KError = 16 - ErrInvalidTopic KError = 17 - ErrMessageSetSizeTooLarge KError = 18 - ErrNotEnoughReplicas KError = 19 - ErrNotEnoughReplicasAfterAppend KError = 20 - ErrInvalidRequiredAcks KError = 21 - ErrIllegalGeneration KError = 22 - ErrInconsistentGroupProtocol KError = 23 - ErrInvalidGroupId KError = 24 - ErrUnknownMemberId KError = 25 - ErrInvalidSessionTimeout KError = 26 - ErrRebalanceInProgress KError = 27 - ErrInvalidCommitOffsetSize KError = 28 - ErrTopicAuthorizationFailed KError = 29 - ErrGroupAuthorizationFailed KError = 30 - ErrClusterAuthorizationFailed KError = 31 - ErrInvalidTimestamp KError = 32 - ErrUnsupportedSASLMechanism KError = 33 - ErrIllegalSASLState KError = 34 - ErrUnsupportedVersion KError = 35 - ErrTopicAlreadyExists KError = 36 - ErrInvalidPartitions KError = 37 - ErrInvalidReplicationFactor KError = 38 - ErrInvalidReplicaAssignment KError = 39 - ErrInvalidConfig KError = 40 - ErrNotController KError = 41 - ErrInvalidRequest KError = 42 - ErrUnsupportedForMessageFormat KError = 43 - ErrPolicyViolation KError = 44 + ErrNoError KError = 0 + ErrUnknown KError = -1 + ErrOffsetOutOfRange KError = 1 + ErrInvalidMessage KError = 2 + ErrUnknownTopicOrPartition KError = 3 + ErrInvalidMessageSize KError = 4 + ErrLeaderNotAvailable KError = 5 + ErrNotLeaderForPartition KError = 6 + ErrRequestTimedOut KError = 7 + ErrBrokerNotAvailable KError = 8 + ErrReplicaNotAvailable KError = 9 + ErrMessageSizeTooLarge KError = 10 + ErrStaleControllerEpochCode KError = 11 + ErrOffsetMetadataTooLarge KError = 12 + ErrNetworkException KError = 13 + ErrOffsetsLoadInProgress KError = 14 + ErrConsumerCoordinatorNotAvailable KError = 15 + ErrNotCoordinatorForConsumer KError = 16 + ErrInvalidTopic KError = 17 + ErrMessageSetSizeTooLarge KError = 18 + ErrNotEnoughReplicas KError = 19 + ErrNotEnoughReplicasAfterAppend KError = 20 + ErrInvalidRequiredAcks KError = 21 + ErrIllegalGeneration KError = 22 + ErrInconsistentGroupProtocol KError = 23 + ErrInvalidGroupId KError = 24 + ErrUnknownMemberId KError = 25 + ErrInvalidSessionTimeout KError = 26 + ErrRebalanceInProgress KError = 27 + ErrInvalidCommitOffsetSize KError = 28 + ErrTopicAuthorizationFailed KError = 29 + ErrGroupAuthorizationFailed KError = 30 + ErrClusterAuthorizationFailed KError = 31 + ErrInvalidTimestamp KError = 32 + ErrUnsupportedSASLMechanism KError = 33 + ErrIllegalSASLState KError = 34 + ErrUnsupportedVersion KError = 35 + ErrTopicAlreadyExists KError = 36 + ErrInvalidPartitions KError = 37 + ErrInvalidReplicationFactor KError = 38 + ErrInvalidReplicaAssignment KError = 39 + ErrInvalidConfig KError = 40 + ErrNotController KError = 41 + ErrInvalidRequest KError = 42 + ErrUnsupportedForMessageFormat KError = 43 + ErrPolicyViolation KError = 44 + ErrOutOfOrderSequenceNumber KError = 45 + ErrDuplicateSequenceNumber KError = 46 + ErrInvalidProducerEpoch KError = 47 + ErrInvalidTxnState KError = 48 + ErrInvalidProducerIDMapping KError = 49 + ErrInvalidTransactionTimeout KError = 50 + ErrConcurrentTransactions KError = 51 + ErrTransactionCoordinatorFenced KError = 52 + ErrTransactionalIDAuthorizationFailed KError = 53 + ErrSecurityDisabled KError = 54 + ErrOperationNotAttempted KError = 55 + ErrKafkaStorageError KError = 56 + ErrLogDirNotFound KError = 57 + ErrSASLAuthenticationFailed KError = 58 + ErrUnknownProducerID KError = 59 + ErrReassignmentInProgress KError = 60 ) func (err KError) Error() string { @@ -215,6 +235,38 @@ func (err KError) Error() string { return "kafka server: The requested operation is not supported by the message format version." case ErrPolicyViolation: return "kafka server: Request parameters do not satisfy the configured policy." + case ErrOutOfOrderSequenceNumber: + return "kafka server: The broker received an out of order sequence number." + case ErrDuplicateSequenceNumber: + return "kafka server: The broker received a duplicate sequence number." + case ErrInvalidProducerEpoch: + return "kafka server: Producer attempted an operation with an old epoch." + case ErrInvalidTxnState: + return "kafka server: The producer attempted a transactional operation in an invalid state." + case ErrInvalidProducerIDMapping: + return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id." + case ErrInvalidTransactionTimeout: + return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)." + case ErrConcurrentTransactions: + return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing." + case ErrTransactionCoordinatorFenced: + return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer." + case ErrTransactionalIDAuthorizationFailed: + return "kafka server: Transactional ID authorization failed." + case ErrSecurityDisabled: + return "kafka server: Security features are disabled." + case ErrOperationNotAttempted: + return "kafka server: The broker did not attempt to execute this operation." + case ErrKafkaStorageError: + return "kafka server: Disk error when trying to access log file on the disk." + case ErrLogDirNotFound: + return "kafka server: The specified log directory is not found in the broker config." + case ErrSASLAuthenticationFailed: + return "kafka server: SASL Authentication failed." + case ErrUnknownProducerID: + return "kafka server: The broker could not locate the producer metadata associated with the Producer ID." + case ErrReassignmentInProgress: + return "kafka server: A partition reassignment is in progress." } return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) diff --git a/vendor/github.com/Shopify/sarama/examples/README.md b/vendor/github.com/Shopify/sarama/examples/README.md new file mode 100644 index 00000000000..85fecefd8dc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/examples/README.md @@ -0,0 +1,9 @@ +# Sarama examples + +This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarama's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama) + +In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version. + +#### HTTP server + +[http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://godoc.org/github.com/Shopify/sarama/mocks) to test both. diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/README.md b/vendor/github.com/Shopify/sarama/examples/http_server/README.md new file mode 100644 index 00000000000..5ff2bc2533b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/examples/http_server/README.md @@ -0,0 +1,7 @@ +# HTTP server example + +This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background. + +If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background. + +One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together. diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go b/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go new file mode 100644 index 00000000000..b6d83c5dc9d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go @@ -0,0 +1,247 @@ +package main + +import ( + "github.com/Shopify/sarama" + + "crypto/tls" + "crypto/x509" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "time" +) + +var ( + addr = flag.String("addr", ":8080", "The address to bind to") + brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list") + verbose = flag.Bool("verbose", false, "Turn on Sarama logging") + certFile = flag.String("certificate", "", "The optional certificate file for client authentication") + keyFile = flag.String("key", "", "The optional key file for client authentication") + caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication") + verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain") +) + +func main() { + flag.Parse() + + if *verbose { + sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) + } + + if *brokers == "" { + flag.PrintDefaults() + os.Exit(1) + } + + brokerList := strings.Split(*brokers, ",") + log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", ")) + + server := &Server{ + DataCollector: newDataCollector(brokerList), + AccessLogProducer: newAccessLogProducer(brokerList), + } + defer func() { + if err := server.Close(); err != nil { + log.Println("Failed to close server", err) + } + }() + + log.Fatal(server.Run(*addr)) +} + +func createTlsConfiguration() (t *tls.Config) { + if *certFile != "" && *keyFile != "" && *caFile != "" { + cert, err := tls.LoadX509KeyPair(*certFile, *keyFile) + if err != nil { + log.Fatal(err) + } + + caCert, err := ioutil.ReadFile(*caFile) + if err != nil { + log.Fatal(err) + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + t = &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + InsecureSkipVerify: *verifySsl, + } + } + // will be nil by default if nothing is provided + return t +} + +type Server struct { + DataCollector sarama.SyncProducer + AccessLogProducer sarama.AsyncProducer +} + +func (s *Server) Close() error { + if err := s.DataCollector.Close(); err != nil { + log.Println("Failed to shut down data collector cleanly", err) + } + + if err := s.AccessLogProducer.Close(); err != nil { + log.Println("Failed to shut down access log producer cleanly", err) + } + + return nil +} + +func (s *Server) Handler() http.Handler { + return s.withAccessLog(s.collectQueryStringData()) +} + +func (s *Server) Run(addr string) error { + httpServer := &http.Server{ + Addr: addr, + Handler: s.Handler(), + } + + log.Printf("Listening for requests on %s...\n", addr) + return httpServer.ListenAndServe() +} + +func (s *Server) collectQueryStringData() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + + // We are not setting a message key, which means that all messages will + // be distributed randomly over the different partitions. + partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{ + Topic: "important", + Value: sarama.StringEncoder(r.URL.RawQuery), + }) + + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "Failed to store your data:, %s", err) + } else { + // The tuple (topic, partition, offset) can be used as a unique identifier + // for a message in a Kafka cluster. + fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset) + } + }) +} + +type accessLogEntry struct { + Method string `json:"method"` + Host string `json:"host"` + Path string `json:"path"` + IP string `json:"ip"` + ResponseTime float64 `json:"response_time"` + + encoded []byte + err error +} + +func (ale *accessLogEntry) ensureEncoded() { + if ale.encoded == nil && ale.err == nil { + ale.encoded, ale.err = json.Marshal(ale) + } +} + +func (ale *accessLogEntry) Length() int { + ale.ensureEncoded() + return len(ale.encoded) +} + +func (ale *accessLogEntry) Encode() ([]byte, error) { + ale.ensureEncoded() + return ale.encoded, ale.err +} + +func (s *Server) withAccessLog(next http.Handler) http.Handler { + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + started := time.Now() + + next.ServeHTTP(w, r) + + entry := &accessLogEntry{ + Method: r.Method, + Host: r.Host, + Path: r.RequestURI, + IP: r.RemoteAddr, + ResponseTime: float64(time.Since(started)) / float64(time.Second), + } + + // We will use the client's IP address as key. This will cause + // all the access log entries of the same IP address to end up + // on the same partition. + s.AccessLogProducer.Input() <- &sarama.ProducerMessage{ + Topic: "access_log", + Key: sarama.StringEncoder(r.RemoteAddr), + Value: entry, + } + }) +} + +func newDataCollector(brokerList []string) sarama.SyncProducer { + + // For the data collector, we are looking for strong consistency semantics. + // Because we don't change the flush settings, sarama will try to produce messages + // as fast as possible to keep latency low. + config := sarama.NewConfig() + config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message + config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message + config.Producer.Return.Successes = true + tlsConfig := createTlsConfiguration() + if tlsConfig != nil { + config.Net.TLS.Config = tlsConfig + config.Net.TLS.Enable = true + } + + // On the broker side, you may want to change the following settings to get + // stronger consistency guarantees: + // - For your broker, set `unclean.leader.election.enable` to false + // - For the topic, you could increase `min.insync.replicas`. + + producer, err := sarama.NewSyncProducer(brokerList, config) + if err != nil { + log.Fatalln("Failed to start Sarama producer:", err) + } + + return producer +} + +func newAccessLogProducer(brokerList []string) sarama.AsyncProducer { + + // For the access log, we are looking for AP semantics, with high throughput. + // By creating batches of compressed messages, we reduce network I/O at a cost of more latency. + config := sarama.NewConfig() + tlsConfig := createTlsConfiguration() + if tlsConfig != nil { + config.Net.TLS.Enable = true + config.Net.TLS.Config = tlsConfig + } + config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack + config.Producer.Compression = sarama.CompressionSnappy // Compress messages + config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms + + producer, err := sarama.NewAsyncProducer(brokerList, config) + if err != nil { + log.Fatalln("Failed to start Sarama producer:", err) + } + + // We will just log to STDOUT if we're not able to produce messages. + // Note: messages will only be returned here after all retry attempts are exhausted. + go func() { + for err := range producer.Errors() { + log.Println("Failed to write access log entry:", err) + } + }() + + return producer +} diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go index ab817a06eee..8c8e3a5afc8 100644 --- a/vendor/github.com/Shopify/sarama/fetch_request.go +++ b/vendor/github.com/Shopify/sarama/fetch_request.go @@ -21,17 +21,35 @@ func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { return nil } +// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See +// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes type FetchRequest struct { MaxWaitTime int32 MinBytes int32 + MaxBytes int32 Version int16 + Isolation IsolationLevel blocks map[string]map[int32]*fetchRequestBlock } +type IsolationLevel int8 + +const ( + ReadUncommitted IsolationLevel = 0 + ReadCommitted IsolationLevel = 1 +) + func (r *FetchRequest) encode(pe packetEncoder) (err error) { pe.putInt32(-1) // replica ID is always -1 for clients pe.putInt32(r.MaxWaitTime) pe.putInt32(r.MinBytes) + if r.Version >= 3 { + pe.putInt32(r.MaxBytes) + } + if r.Version >= 4 { + pe.putInt8(int8(r.Isolation)) + } err = pe.putArrayLength(len(r.blocks)) if err != nil { return err @@ -67,6 +85,18 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { if r.MinBytes, err = pd.getInt32(); err != nil { return err } + if r.Version >= 3 { + if r.MaxBytes, err = pd.getInt32(); err != nil { + return err + } + } + if r.Version >= 4 { + isolation, err := pd.getInt8() + if err != nil { + return err + } + r.Isolation = IsolationLevel(isolation) + } topicCount, err := pd.getArrayLength() if err != nil { return err @@ -114,6 +144,10 @@ func (r *FetchRequest) requiredVersion() KafkaVersion { return V0_9_0_0 case 2: return V0_10_0_0 + case 3: + return V0_10_1_0 + case 4: + return V0_11_0_0 default: return minVersion } diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go index b56b166c282..0e81ad89f43 100644 --- a/vendor/github.com/Shopify/sarama/fetch_response.go +++ b/vendor/github.com/Shopify/sarama/fetch_response.go @@ -1,14 +1,44 @@ package sarama -import "time" +import ( + "time" +) + +type AbortedTransaction struct { + ProducerID int64 + FirstOffset int64 +} + +func (t *AbortedTransaction) decode(pd packetDecoder) (err error) { + if t.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if t.FirstOffset, err = pd.getInt64(); err != nil { + return err + } + + return nil +} + +func (t *AbortedTransaction) encode(pe packetEncoder) (err error) { + pe.putInt64(t.ProducerID) + pe.putInt64(t.FirstOffset) + + return nil +} type FetchResponseBlock struct { Err KError HighWaterMarkOffset int64 - MsgSet MessageSet + LastStableOffset int64 + AbortedTransactions []*AbortedTransaction + Records *Records // deprecated: use FetchResponseBlock.Records + RecordsSet []*Records + Partial bool } -func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) { +func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { tmp, err := pd.getInt16() if err != nil { return err @@ -20,29 +50,126 @@ func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) { return err } - msgSetSize, err := pd.getInt32() + if version >= 4 { + b.LastStableOffset, err = pd.getInt64() + if err != nil { + return err + } + + numTransact, err := pd.getArrayLength() + if err != nil { + return err + } + + if numTransact >= 0 { + b.AbortedTransactions = make([]*AbortedTransaction, numTransact) + } + + for i := 0; i < numTransact; i++ { + transact := new(AbortedTransaction) + if err = transact.decode(pd); err != nil { + return err + } + b.AbortedTransactions[i] = transact + } + } + + recordsSize, err := pd.getInt32() if err != nil { return err } - msgSetDecoder, err := pd.getSubset(int(msgSetSize)) + recordsDecoder, err := pd.getSubset(int(recordsSize)) if err != nil { return err } - err = (&b.MsgSet).decode(msgSetDecoder) - return err + b.RecordsSet = []*Records{} + + for recordsDecoder.remaining() > 0 { + records := &Records{} + if err := records.decode(recordsDecoder); err != nil { + // If we have at least one decoded records, this is not an error + if err == ErrInsufficientData { + if len(b.RecordsSet) == 0 { + b.Partial = true + } + break + } + return err + } + + partial, err := records.isPartial() + if err != nil { + return err + } + + // If we have at least one full records, we skip incomplete ones + if partial && len(b.RecordsSet) > 0 { + break + } + + b.RecordsSet = append(b.RecordsSet, records) + + if b.Records == nil { + b.Records = records + } + } + + return nil } -func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) { +func (b *FetchResponseBlock) numRecords() (int, error) { + sum := 0 + + for _, records := range b.RecordsSet { + count, err := records.numRecords() + if err != nil { + return 0, err + } + + sum += count + } + + return sum, nil +} + +func (b *FetchResponseBlock) isPartial() (bool, error) { + if b.Partial { + return true, nil + } + + if len(b.RecordsSet) == 1 { + return b.RecordsSet[0].isPartial() + } + + return false, nil +} + +func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { pe.putInt16(int16(b.Err)) pe.putInt64(b.HighWaterMarkOffset) + if version >= 4 { + pe.putInt64(b.LastStableOffset) + + if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil { + return err + } + for _, transact := range b.AbortedTransactions { + if err = transact.encode(pe); err != nil { + return err + } + } + } + pe.push(&lengthField{}) - err = b.MsgSet.encode(pe) - if err != nil { - return err + for _, records := range b.RecordsSet { + err = records.encode(pe) + if err != nil { + return err + } } return pe.pop() } @@ -90,7 +217,7 @@ func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { } block := new(FetchResponseBlock) - err = block.decode(pd) + err = block.decode(pd, version) if err != nil { return err } @@ -124,7 +251,7 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) { for id, block := range partitions { pe.putInt32(id) - err = block.encode(pe) + err = block.encode(pe, r.Version) if err != nil { return err } @@ -148,6 +275,10 @@ func (r *FetchResponse) requiredVersion() KafkaVersion { return V0_9_0_0 case 2: return V0_10_0_0 + case 3: + return V0_10_1_0 + case 4: + return V0_11_0_0 default: return minVersion } @@ -182,7 +313,7 @@ func (r *FetchResponse) AddError(topic string, partition int32, err KError) { frb.Err = err } -func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { +func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock { if r.Blocks == nil { r.Blocks = make(map[string]map[int32]*FetchResponseBlock) } @@ -196,6 +327,11 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc frb = new(FetchResponseBlock) partitions[partition] = frb } + + return frb +} + +func encodeKV(key, value Encoder) ([]byte, []byte) { var kb []byte var vb []byte if key != nil { @@ -204,7 +340,46 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc if value != nil { vb, _ = value.Encode() } + + return kb, vb +} + +func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) msg := &Message{Key: kb, Value: vb} msgBlock := &MessageBlock{Msg: msg, Offset: offset} - frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock) + if len(frb.RecordsSet) == 0 { + records := newLegacyRecords(&MessageSet{}) + frb.RecordsSet = []*Records{&records} + } + set := frb.RecordsSet[0].msgSet + set.Messages = append(set.Messages, msgBlock) +} + +func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + rec := &Record{Key: kb, Value: vb, OffsetDelta: offset} + if len(frb.RecordsSet) == 0 { + records := newDefaultRecords(&RecordBatch{Version: 2}) + frb.RecordsSet = []*Records{&records} + } + batch := frb.RecordsSet[0].recordBatch + batch.addRecord(rec) +} + +func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) { + frb := r.getOrCreateBlock(topic, partition) + if len(frb.RecordsSet) == 0 { + records := newDefaultRecords(&RecordBatch{Version: 2}) + frb.RecordsSet = []*Records{&records} + } + batch := frb.RecordsSet[0].recordBatch + batch.LastOffsetDelta = offset +} + +func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) { + frb := r.getOrCreateBlock(topic, partition) + frb.LastStableOffset = offset } diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go new file mode 100644 index 00000000000..8ceb6c23255 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go @@ -0,0 +1,43 @@ +package sarama + +import "time" + +type InitProducerIDRequest struct { + TransactionalID *string + TransactionTimeout time.Duration +} + +func (i *InitProducerIDRequest) encode(pe packetEncoder) error { + if err := pe.putNullableString(i.TransactionalID); err != nil { + return err + } + pe.putInt32(int32(i.TransactionTimeout / time.Millisecond)) + + return nil +} + +func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) { + if i.TransactionalID, err = pd.getNullableString(); err != nil { + return err + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + i.TransactionTimeout = time.Duration(timeout) * time.Millisecond + + return nil +} + +func (i *InitProducerIDRequest) key() int16 { + return 22 +} + +func (i *InitProducerIDRequest) version() int16 { + return 0 +} + +func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go new file mode 100644 index 00000000000..1b32eb085b2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go @@ -0,0 +1,55 @@ +package sarama + +import "time" + +type InitProducerIDResponse struct { + ThrottleTime time.Duration + Err KError + ProducerID int64 + ProducerEpoch int16 +} + +func (i *InitProducerIDResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(i.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(i.Err)) + pe.putInt64(i.ProducerID) + pe.putInt16(i.ProducerEpoch) + + return nil +} + +func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + i.Err = KError(kerr) + + if i.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if i.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + return nil +} + +func (i *InitProducerIDResponse) key() int16 { + return 22 +} + +func (i *InitProducerIDResponse) version() int16 { + return 0 +} + +func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go index 70078be5d9f..576b1a6f6f8 100644 --- a/vendor/github.com/Shopify/sarama/length_field.go +++ b/vendor/github.com/Shopify/sarama/length_field.go @@ -27,3 +27,43 @@ func (l *lengthField) check(curOffset int, buf []byte) error { return nil } + +type varintLengthField struct { + startOffset int + length int64 +} + +func (l *varintLengthField) decode(pd packetDecoder) error { + var err error + l.length, err = pd.getVarint() + return err +} + +func (l *varintLengthField) saveOffset(in int) { + l.startOffset = in +} + +func (l *varintLengthField) adjustLength(currOffset int) int { + oldFieldSize := l.reserveLength() + l.length = int64(currOffset - l.startOffset - oldFieldSize) + + return l.reserveLength() - oldFieldSize +} + +func (l *varintLengthField) reserveLength() int { + var tmp [binary.MaxVarintLen64]byte + return binary.PutVarint(tmp[:], l.length) +} + +func (l *varintLengthField) run(curOffset int, buf []byte) error { + binary.PutVarint(buf[l.startOffset:], l.length) + return nil +} + +func (l *varintLengthField) check(curOffset int, buf []byte) error { + if int64(curOffset-l.startOffset-l.reserveLength()) != l.length { + return PacketDecodingError{"length field invalid"} + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go index 327c5fa2add..bd5650bbc07 100644 --- a/vendor/github.com/Shopify/sarama/message.go +++ b/vendor/github.com/Shopify/sarama/message.go @@ -37,7 +37,7 @@ type Message struct { } func (m *Message) encode(pe packetEncoder) error { - pe.push(&crc32Field{}) + pe.push(newCRC32Field(crcIEEE)) pe.putInt8(m.Version) @@ -45,7 +45,9 @@ func (m *Message) encode(pe packetEncoder) error { pe.putInt8(attributes) if m.Version >= 1 { - pe.putInt64(m.Timestamp.UnixNano() / int64(time.Millisecond)) + if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil { + return err + } } err := pe.putBytes(m.Key) @@ -104,7 +106,7 @@ func (m *Message) encode(pe packetEncoder) error { } func (m *Message) decode(pd packetDecoder) (err error) { - err = pd.push(&crc32Field{}) + err = pd.push(newCRC32Field(crcIEEE)) if err != nil { return err } @@ -114,18 +116,20 @@ func (m *Message) decode(pd packetDecoder) (err error) { return err } + if m.Version > 1 { + return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)} + } + attribute, err := pd.getInt8() if err != nil { return err } m.Codec = CompressionCodec(attribute & compressionCodecMask) - if m.Version >= 1 { - millis, err := pd.getInt64() - if err != nil { + if m.Version == 1 { + if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil { return err } - m.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) } m.Key, err = pd.getBytes() diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go index f028784e51a..27db52fdf1f 100644 --- a/vendor/github.com/Shopify/sarama/message_set.go +++ b/vendor/github.com/Shopify/sarama/message_set.go @@ -64,6 +64,19 @@ func (ms *MessageSet) decode(pd packetDecoder) (err error) { ms.Messages = nil for pd.remaining() > 0 { + magic, err := magicValue(pd) + if err != nil { + if err == ErrInsufficientData { + ms.PartialTrailingMessage = true + return nil + } + return err + } + + if magic > 1 { + return nil + } + msb := new(MessageBlock) err = msb.decode(pd) switch err { diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go index 0734d34f6cb..55ef1e2920f 100644 --- a/vendor/github.com/Shopify/sarama/mockbroker.go +++ b/vendor/github.com/Shopify/sarama/mockbroker.go @@ -288,6 +288,15 @@ func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { // NewMockBrokerAddr behaves like newMockBroker but listens on the address you give // it rather than just some ephemeral port. func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { + listener, err := net.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + return NewMockBrokerListener(t, brokerID, listener) +} + +// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified. +func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker { var err error broker := &MockBroker{ @@ -296,13 +305,10 @@ func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker t: t, brokerID: brokerID, expectations: make(chan encoder, 512), + listener: listener, } broker.handler = broker.defaultRequestHandler - broker.listener, err = net.Listen("tcp", addr) - if err != nil { - t.Fatal(err) - } Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) if err != nil { diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go index a203142094d..f79a9d5e9b4 100644 --- a/vendor/github.com/Shopify/sarama/mockresponses.go +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -122,6 +122,7 @@ func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { type MockOffsetResponse struct { offsets map[string]map[int32]map[int64]int64 t TestReporter + version int16 } func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { @@ -131,6 +132,11 @@ func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { } } +func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse { + mor.version = version + return mor +} + func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { partitions := mor.offsets[topic] if partitions == nil { @@ -148,7 +154,7 @@ func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, of func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder { offsetRequest := reqBody.(*OffsetRequest) - offsetResponse := &OffsetResponse{} + offsetResponse := &OffsetResponse{Version: mor.version} for topic, partitions := range offsetRequest.blocks { for partition, block := range partitions { offset := mor.getOffset(topic, partition, block.time) @@ -180,6 +186,7 @@ type MockFetchResponse struct { highWaterMarks map[string]map[int32]int64 t TestReporter batchSize int + version int16 } func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { @@ -191,6 +198,11 @@ func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { } } +func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse { + mfr.version = version + return mfr +} + func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { partitions := mfr.messages[topic] if partitions == nil { @@ -218,7 +230,9 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { fetchRequest := reqBody.(*FetchRequest) - res := &FetchResponse{} + res := &FetchResponse{ + Version: mfr.version, + } for topic, partitions := range fetchRequest.blocks { for partition, block := range partitions { initialOffset := block.fetchOffset @@ -370,14 +384,20 @@ func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int3 // MockProduceResponse is a `ProduceResponse` builder. type MockProduceResponse struct { - errors map[string]map[int32]KError - t TestReporter + version int16 + errors map[string]map[int32]KError + t TestReporter } func NewMockProduceResponse(t TestReporter) *MockProduceResponse { return &MockProduceResponse{t: t} } +func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse { + mr.version = version + return mr +} + func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse { if mr.errors == nil { mr.errors = make(map[string]map[int32]KError) @@ -393,8 +413,10 @@ func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KE func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder { req := reqBody.(*ProduceRequest) - res := &ProduceResponse{} - for topic, partitions := range req.msgSets { + res := &ProduceResponse{ + Version: mr.version, + } + for topic, partitions := range req.records { for partition := range partitions { res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) } diff --git a/vendor/github.com/Shopify/sarama/mocks/README.md b/vendor/github.com/Shopify/sarama/mocks/README.md new file mode 100644 index 00000000000..55a6c2e61cd --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mocks/README.md @@ -0,0 +1,13 @@ +# sarama/mocks + +The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types. +You can use them to test your sarama applications using dependency injection. + +The following mock objects are available: + +- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks. +- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer) +- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer) + +The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified, +and the results will be reported to the `*testing.T` object you provided when creating the mock. diff --git a/vendor/github.com/Shopify/sarama/mocks/async_producer.go b/vendor/github.com/Shopify/sarama/mocks/async_producer.go new file mode 100644 index 00000000000..24ae5c0d581 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mocks/async_producer.go @@ -0,0 +1,174 @@ +package mocks + +import ( + "sync" + + "github.com/Shopify/sarama" +) + +// AsyncProducer implements sarama's Producer interface for testing purposes. +// Before you can send messages to it's Input channel, you have to set expectations +// so it knows how to handle the input; it returns an error if the number of messages +// received is bigger then the number of expectations set. You can also set a +// function in each expectation so that the message value is checked by this function +// and an error is returned if the match fails. +type AsyncProducer struct { + l sync.Mutex + t ErrorReporter + expectations []*producerExpectation + closed chan struct{} + input chan *sarama.ProducerMessage + successes chan *sarama.ProducerMessage + errors chan *sarama.ProducerError + lastOffset int64 +} + +// NewAsyncProducer instantiates a new Producer mock. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument is used to determine whether it +// should ack successes on the Successes channel. +func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { + if config == nil { + config = sarama.NewConfig() + } + mp := &AsyncProducer{ + t: t, + closed: make(chan struct{}, 0), + expectations: make([]*producerExpectation, 0), + input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), + successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), + errors: make(chan *sarama.ProducerError, config.ChannelBufferSize), + } + + go func() { + defer func() { + close(mp.successes) + close(mp.errors) + }() + + for msg := range mp.input { + mp.l.Lock() + if mp.expectations == nil || len(mp.expectations) == 0 { + mp.expectations = nil + mp.t.Errorf("No more expectation set on this mock producer to handle the input message.") + } else { + expectation := mp.expectations[0] + mp.expectations = mp.expectations[1:] + if expectation.CheckFunction != nil { + if val, err := msg.Value.Encode(); err != nil { + mp.t.Errorf("Input message encoding failed: %s", err.Error()) + mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} + } else { + err = expectation.CheckFunction(val) + if err != nil { + mp.t.Errorf("Check function returned an error: %s", err.Error()) + mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} + } + } + } + if expectation.Result == errProduceSuccess { + mp.lastOffset++ + if config.Producer.Return.Successes { + msg.Offset = mp.lastOffset + mp.successes <- msg + } + } else { + if config.Producer.Return.Errors { + mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg} + } + } + } + mp.l.Unlock() + } + + mp.l.Lock() + if len(mp.expectations) > 0 { + mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations)) + } + mp.l.Unlock() + + close(mp.closed) + }() + + return mp +} + +//////////////////////////////////////////////// +// Implement Producer interface +//////////////////////////////////////////////// + +// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation. +// By closing a mock producer, you also tell it that no more input will be provided, so it will +// write an error to the test state if there's any remaining expectations. +func (mp *AsyncProducer) AsyncClose() { + close(mp.input) +} + +// Close corresponds with the Close method of sarama's Producer implementation. +// By closing a mock producer, you also tell it that no more input will be provided, so it will +// write an error to the test state if there's any remaining expectations. +func (mp *AsyncProducer) Close() error { + mp.AsyncClose() + <-mp.closed + return nil +} + +// Input corresponds with the Input method of sarama's Producer implementation. +// You have to set expectations on the mock producer before writing messages to the Input +// channel, so it knows how to handle them. If there is no more remaining expectations and +// a messages is written to the Input channel, the mock producer will write an error to the test +// state object. +func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage { + return mp.input +} + +// Successes corresponds with the Successes method of sarama's Producer implementation. +func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage { + return mp.successes +} + +// Errors corresponds with the Errors method of sarama's Producer implementation. +func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError { + return mp.errors +} + +//////////////////////////////////////////////// +// Setting expectations +//////////////////////////////////////////////// + +// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message +// will be provided on the input channel. The mock producer will call the given function to check +// the message value. If an error is returned it will be made available on the Errors channel +// otherwise the mock will handle the message as if it produced successfully, i.e. it will make +// it available on the Successes channel if the Producer.Return.Successes setting is set to true. +func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) { + mp.l.Lock() + defer mp.l.Unlock() + mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) +} + +// ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message +// will be provided on the input channel. The mock producer will first call the given function to +// check the message value. If an error is returned it will be made available on the Errors channel +// otherwise the mock will handle the message as if it failed to produce successfully. This means +// it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) { + mp.l.Lock() + defer mp.l.Unlock() + mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) +} + +// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided +// on the input channel. The mock producer will handle the message as if it is produced successfully, +// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting +// is set to true. +func (mp *AsyncProducer) ExpectInputAndSucceed() { + mp.ExpectInputWithCheckerFunctionAndSucceed(nil) +} + +// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided +// on the input channel. The mock producer will handle the message as if it failed to produce +// successfully. This means it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputAndFail(err error) { + mp.ExpectInputWithCheckerFunctionAndFail(nil, err) +} diff --git a/vendor/github.com/Shopify/sarama/mocks/consumer.go b/vendor/github.com/Shopify/sarama/mocks/consumer.go new file mode 100644 index 00000000000..003d4d3e281 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mocks/consumer.go @@ -0,0 +1,315 @@ +package mocks + +import ( + "sync" + "sync/atomic" + + "github.com/Shopify/sarama" +) + +// Consumer implements sarama's Consumer interface for testing purposes. +// Before you can start consuming from this consumer, you have to register +// topic/partitions using ExpectConsumePartition, and set expectations on them. +type Consumer struct { + l sync.Mutex + t ErrorReporter + config *sarama.Config + partitionConsumers map[string]map[int32]*PartitionConsumer + metadata map[string][]int32 +} + +// NewConsumer returns a new mock Consumer instance. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument can be set to nil. +func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer { + if config == nil { + config = sarama.NewConfig() + } + + c := &Consumer{ + t: t, + config: config, + partitionConsumers: make(map[string]map[int32]*PartitionConsumer), + } + return c +} + +/////////////////////////////////////////////////// +// Consumer interface implementation +/////////////////////////////////////////////////// + +// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface. +// Before you can start consuming a partition, you have to set expectations on it using +// ExpectConsumePartition. You can only consume a partition once per consumer. +func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil { + c.t.Errorf("No expectations set for %s/%d", topic, partition) + return nil, errOutOfExpectations + } + + pc := c.partitionConsumers[topic][partition] + if pc.consumed { + return nil, sarama.ConfigurationError("The topic/partition is already being consumed") + } + + if pc.offset != AnyOffset && pc.offset != offset { + c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset) + } + + pc.consumed = true + return pc, nil +} + +// Topics returns a list of topics, as registered with SetMetadata +func (c *Consumer) Topics() ([]string, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.metadata == nil { + c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.") + return nil, sarama.ErrOutOfBrokers + } + + var result []string + for topic := range c.metadata { + result = append(result, topic) + } + return result, nil +} + +// Partitions returns the list of parititons for the given topic, as registered with SetMetadata +func (c *Consumer) Partitions(topic string) ([]int32, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.metadata == nil { + c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.") + return nil, sarama.ErrOutOfBrokers + } + if c.metadata[topic] == nil { + return nil, sarama.ErrUnknownTopicOrPartition + } + + return c.metadata[topic], nil +} + +func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { + c.l.Lock() + defer c.l.Unlock() + + hwms := make(map[string]map[int32]int64, len(c.partitionConsumers)) + for topic, partitionConsumers := range c.partitionConsumers { + hwm := make(map[int32]int64, len(partitionConsumers)) + for partition, pc := range partitionConsumers { + hwm[partition] = pc.HighWaterMarkOffset() + } + hwms[topic] = hwm + } + + return hwms +} + +// Close implements the Close method from the sarama.Consumer interface. It will close +// all registered PartitionConsumer instances. +func (c *Consumer) Close() error { + c.l.Lock() + defer c.l.Unlock() + + for _, partitions := range c.partitionConsumers { + for _, partitionConsumer := range partitions { + _ = partitionConsumer.Close() + } + } + + return nil +} + +/////////////////////////////////////////////////// +// Expectation API +/////////////////////////////////////////////////// + +// SetTopicMetadata sets the clusters topic/partition metadata, +// which will be returned by Topics() and Partitions(). +func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) { + c.l.Lock() + defer c.l.Unlock() + + c.metadata = metadata +} + +// ExpectConsumePartition will register a topic/partition, so you can set expectations on it. +// The registered PartitionConsumer will be returned, so you can set expectations +// on it using method chaining. Once a topic/partition is registered, you are +// expected to start consuming it using ConsumePartition. If that doesn't happen, +// an error will be written to the error reporter once the mock consumer is closed. It will +// also expect that the +func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer { + c.l.Lock() + defer c.l.Unlock() + + if c.partitionConsumers[topic] == nil { + c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer) + } + + if c.partitionConsumers[topic][partition] == nil { + c.partitionConsumers[topic][partition] = &PartitionConsumer{ + t: c.t, + topic: topic, + partition: partition, + offset: offset, + messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), + errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize), + } + } + + return c.partitionConsumers[topic][partition] +} + +/////////////////////////////////////////////////// +// PartitionConsumer mock type +/////////////////////////////////////////////////// + +// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes. +// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is +// registered first using the Consumer's ExpectConsumePartition method. Before consuming the +// Errors and Messages channel, you should specify what values will be provided on these +// channels using YieldMessage and YieldError. +type PartitionConsumer struct { + highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG + l sync.Mutex + t ErrorReporter + topic string + partition int32 + offset int64 + messages chan *sarama.ConsumerMessage + errors chan *sarama.ConsumerError + singleClose sync.Once + consumed bool + errorsShouldBeDrained bool + messagesShouldBeDrained bool +} + +/////////////////////////////////////////////////// +// PartitionConsumer interface implementation +/////////////////////////////////////////////////// + +// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) AsyncClose() { + pc.singleClose.Do(func() { + close(pc.messages) + close(pc.errors) + }) +} + +// Close implements the Close method from the sarama.PartitionConsumer interface. It will +// verify whether the partition consumer was actually started. +func (pc *PartitionConsumer) Close() error { + if !pc.consumed { + pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition) + return errPartitionConsumerNotStarted + } + + if pc.errorsShouldBeDrained && len(pc.errors) > 0 { + pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors)) + } + + if pc.messagesShouldBeDrained && len(pc.messages) > 0 { + pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages)) + } + + pc.AsyncClose() + + var ( + closeErr error + wg sync.WaitGroup + ) + + wg.Add(1) + go func() { + defer wg.Done() + + var errs = make(sarama.ConsumerErrors, 0) + for err := range pc.errors { + errs = append(errs, err) + } + + if len(errs) > 0 { + closeErr = errs + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + for range pc.messages { + // drain + } + }() + + wg.Wait() + return closeErr +} + +// Errors implements the Errors method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError { + return pc.errors +} + +// Messages implements the Messages method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage { + return pc.messages +} + +func (pc *PartitionConsumer) HighWaterMarkOffset() int64 { + return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1 +} + +/////////////////////////////////////////////////// +// Expectation API +/////////////////////////////////////////////////// + +// YieldMessage will yield a messages Messages channel of this partition consumer +// when it is consumed. By default, the mock consumer will not verify whether this +// message was consumed from the Messages channel, because there are legitimate +// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will +// verify that the channel is empty on close. +func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) { + pc.l.Lock() + defer pc.l.Unlock() + + msg.Topic = pc.topic + msg.Partition = pc.partition + msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1) + + pc.messages <- msg +} + +// YieldError will yield an error on the Errors channel of this partition consumer +// when it is consumed. By default, the mock consumer will not verify whether this error was +// consumed from the Errors channel, because there are legitimate reasons for this +// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that +// the channel is empty on close. +func (pc *PartitionConsumer) YieldError(err error) { + pc.errors <- &sarama.ConsumerError{ + Topic: pc.topic, + Partition: pc.partition, + Err: err, + } +} + +// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer +// that the messages channel will be fully drained when Close is called. If this +// expectation is not met, an error is reported to the error reporter. +func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() { + pc.messagesShouldBeDrained = true +} + +// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer +// that the errors channel will be fully drained when Close is called. If this +// expectation is not met, an error is reported to the error reporter. +func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() { + pc.errorsShouldBeDrained = true +} diff --git a/vendor/github.com/Shopify/sarama/mocks/mocks.go b/vendor/github.com/Shopify/sarama/mocks/mocks.go new file mode 100644 index 00000000000..4adb838d996 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mocks/mocks.go @@ -0,0 +1,48 @@ +/* +Package mocks provides mocks that can be used for testing applications +that use Sarama. The mock types provided by this package implement the +interfaces Sarama exports, so you can use them for dependency injection +in your tests. + +All mock instances require you to set expectations on them before you +can use them. It will determine how the mock will behave. If an +expectation is not met, it will make your test fail. + +NOTE: this package currently does not fall under the API stability +guarantee of Sarama as it is still considered experimental. +*/ +package mocks + +import ( + "errors" + + "github.com/Shopify/sarama" +) + +// ErrorReporter is a simple interface that includes the testing.T methods we use to report +// expectation violations when using the mock objects. +type ErrorReporter interface { + Errorf(string, ...interface{}) +} + +// ValueChecker is a function type to be set in each expectation of the producer mocks +// to check the value passed. +type ValueChecker func(val []byte) error + +var ( + errProduceSuccess error = nil + errOutOfExpectations = errors.New("No more expectations set on mock") + errPartitionConsumerNotStarted = errors.New("The partition consumer was never started") +) + +const AnyOffset int64 = -1000 + +type producerExpectation struct { + Result error + CheckFunction ValueChecker +} + +type consumerExpectation struct { + Err error + Msg *sarama.ConsumerMessage +} diff --git a/vendor/github.com/Shopify/sarama/mocks/sync_producer.go b/vendor/github.com/Shopify/sarama/mocks/sync_producer.go new file mode 100644 index 00000000000..3f4986e2f8c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mocks/sync_producer.go @@ -0,0 +1,157 @@ +package mocks + +import ( + "sync" + + "github.com/Shopify/sarama" +) + +// SyncProducer implements sarama's SyncProducer interface for testing purposes. +// Before you can use it, you have to set expectations on the mock SyncProducer +// to tell it how to handle calls to SendMessage, so you can easily test success +// and failure scenarios. +type SyncProducer struct { + l sync.Mutex + t ErrorReporter + expectations []*producerExpectation + lastOffset int64 +} + +// NewSyncProducer instantiates a new SyncProducer mock. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument is currently unused, but is +// maintained to be compatible with the async Producer. +func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer { + return &SyncProducer{ + t: t, + expectations: make([]*producerExpectation, 0), + } +} + +//////////////////////////////////////////////// +// Implement SyncProducer interface +//////////////////////////////////////////////// + +// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation. +// You have to set expectations on the mock producer before calling SendMessage, so it knows +// how to handle them. You can set a function in each expectation so that the message value +// checked by this function and an error is returned if the match fails. +// If there is no more remaining expectation when SendMessage is called, +// the mock producer will write an error to the test state object. +func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) > 0 { + expectation := sp.expectations[0] + sp.expectations = sp.expectations[1:] + if expectation.CheckFunction != nil { + val, err := msg.Value.Encode() + if err != nil { + sp.t.Errorf("Input message encoding failed: %s", err.Error()) + return -1, -1, err + } + + errCheck := expectation.CheckFunction(val) + if errCheck != nil { + sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) + return -1, -1, errCheck + } + } + if expectation.Result == errProduceSuccess { + sp.lastOffset++ + msg.Offset = sp.lastOffset + return 0, msg.Offset, nil + } + return -1, -1, expectation.Result + } + sp.t.Errorf("No more expectation set on this mock producer to handle the input message.") + return -1, -1, errOutOfExpectations +} + +// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation. +// You have to set expectations on the mock producer before calling SendMessages, so it knows +// how to handle them. If there is no more remaining expectations when SendMessages is called, +// the mock producer will write an error to the test state object. +func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) >= len(msgs) { + expectations := sp.expectations[0:len(msgs)] + sp.expectations = sp.expectations[len(msgs):] + + for i, expectation := range expectations { + if expectation.CheckFunction != nil { + val, err := msgs[i].Value.Encode() + if err != nil { + sp.t.Errorf("Input message encoding failed: %s", err.Error()) + return err + } + errCheck := expectation.CheckFunction(val) + if errCheck != nil { + sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) + return errCheck + } + } + if expectation.Result != errProduceSuccess { + return expectation.Result + } + } + return nil + } + sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.") + return errOutOfExpectations +} + +// Close corresponds with the Close method of sarama's SyncProducer implementation. +// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow, +// so it will write an error to the test state if there's any remaining expectations. +func (sp *SyncProducer) Close() error { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) > 0 { + sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations)) + } + + return nil +} + +//////////////////////////////////////////////// +// Setting expectations +//////////////////////////////////////////////// + +// ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage +// will be called. The mock producer will first call the given function to check the message value. +// It will cascade the error of the function, if any, or handle the message as if it produced +// successfully, i.e. by returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) { + sp.l.Lock() + defer sp.l.Unlock() + sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) +} + +// ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will first call the given function to check the message value. +// It will cascade the error of the function, if any, or handle the message as if it failed +// to produce successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) { + sp.l.Lock() + defer sp.l.Unlock() + sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) +} + +// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will handle the message as if it produced successfully, i.e. by +// returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageAndSucceed() { + sp.ExpectSendMessageWithCheckerFunctionAndSucceed(nil) +} + +// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will handle the message as if it failed to produce +// successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageAndFail(err error) { + sp.ExpectSendMessageWithCheckerFunctionAndFail(nil, err) +} diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go index 5e15cdafe3a..6c01f959e99 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/Shopify/sarama/offset_manager.go @@ -151,6 +151,13 @@ type PartitionOffsetManager interface { // message twice, and your processing should ideally be idempotent. MarkOffset(offset int64, metadata string) + // ResetOffset resets to the provided offset, alongside a metadata string that + // represents the state of the partition consumer at that point in time. Reset + // acts as a counterpart to MarkOffset, the difference being that it allows to + // reset an offset to an earlier or smaller value, where MarkOffset only + // allows incrementing the offset. cf MarkOffset for more details. + ResetOffset(offset int64, metadata string) + // Errors returns a read channel of errors that occur during offset management, if // enabled. By default, errors are logged and not returned over this channel. If // you want to implement any custom error handling, set your config's @@ -329,6 +336,17 @@ func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) { } } +func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if offset <= pom.offset { + pom.offset = offset + pom.metadata = metadata + pom.dirty = true + } +} + func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) { pom.lock.Lock() defer pom.lock.Unlock() diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go index 28670c0e625..74805ccbf53 100644 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ b/vendor/github.com/Shopify/sarama/packet_decoder.go @@ -9,11 +9,16 @@ type packetDecoder interface { getInt16() (int16, error) getInt32() (int32, error) getInt64() (int64, error) + getVarint() (int64, error) getArrayLength() (int, error) + getBool() (bool, error) // Collections getBytes() ([]byte, error) + getVarintBytes() ([]byte, error) + getRawBytes(length int) ([]byte, error) getString() (string, error) + getNullableString() (*string, error) getInt32Array() ([]int32, error) getInt64Array() ([]int64, error) getStringArray() ([]string, error) @@ -21,6 +26,7 @@ type packetDecoder interface { // Subsets remaining() int getSubset(length int) (packetDecoder, error) + peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset // Stacks, see PushDecoder push(in pushDecoder) error @@ -43,3 +49,12 @@ type pushDecoder interface { // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. check(curOffset int, buf []byte) error } + +// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the +// fields itself is unknown until its value was decoded (for instance varint encoded length +// fields). +// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength() +type dynamicPushDecoder interface { + pushDecoder + decoder +} diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go index 27a10f6d44b..67b8daed829 100644 --- a/vendor/github.com/Shopify/sarama/packet_encoder.go +++ b/vendor/github.com/Shopify/sarama/packet_encoder.go @@ -11,12 +11,16 @@ type packetEncoder interface { putInt16(in int16) putInt32(in int32) putInt64(in int64) + putVarint(in int64) putArrayLength(in int) error + putBool(in bool) // Collections putBytes(in []byte) error + putVarintBytes(in []byte) error putRawBytes(in []byte) error putString(in string) error + putNullableString(in *string) error putStringArray(in []string) error putInt32Array(in []int32) error putInt64Array(in []int64) error @@ -48,3 +52,14 @@ type pushEncoder interface { // of data to the saved offset, based on the data between the saved offset and curOffset. run(curOffset int, buf []byte) error } + +// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the +// fields itself is unknown until its value was computed (for instance varint encoded length +// fields). +type dynamicPushEncoder interface { + pushEncoder + + // Called during pop() to adjust the length of the field. + // It should return the difference in bytes between the last computed length and current length. + adjustLength(currOffset int) int +} diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go index fd5ea0f9194..b633cd15111 100644 --- a/vendor/github.com/Shopify/sarama/prep_encoder.go +++ b/vendor/github.com/Shopify/sarama/prep_encoder.go @@ -1,6 +1,7 @@ package sarama import ( + "encoding/binary" "fmt" "math" @@ -8,6 +9,7 @@ import ( ) type prepEncoder struct { + stack []pushEncoder length int } @@ -29,6 +31,11 @@ func (pe *prepEncoder) putInt64(in int64) { pe.length += 8 } +func (pe *prepEncoder) putVarint(in int64) { + var buf [binary.MaxVarintLen64]byte + pe.length += binary.PutVarint(buf[:], in) +} + func (pe *prepEncoder) putArrayLength(in int) error { if in > math.MaxInt32 { return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} @@ -37,6 +44,10 @@ func (pe *prepEncoder) putArrayLength(in int) error { return nil } +func (pe *prepEncoder) putBool(in bool) { + pe.length++ +} + // arrays func (pe *prepEncoder) putBytes(in []byte) error { @@ -44,11 +55,16 @@ func (pe *prepEncoder) putBytes(in []byte) error { if in == nil { return nil } - if len(in) > math.MaxInt32 { - return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} + return pe.putRawBytes(in) +} + +func (pe *prepEncoder) putVarintBytes(in []byte) error { + if in == nil { + pe.putVarint(-1) + return nil } - pe.length += len(in) - return nil + pe.putVarint(int64(len(in))) + return pe.putRawBytes(in) } func (pe *prepEncoder) putRawBytes(in []byte) error { @@ -59,6 +75,14 @@ func (pe *prepEncoder) putRawBytes(in []byte) error { return nil } +func (pe *prepEncoder) putNullableString(in *string) error { + if in == nil { + pe.length += 2 + return nil + } + return pe.putString(*in) +} + func (pe *prepEncoder) putString(in string) error { pe.length += 2 if len(in) > math.MaxInt16 { @@ -108,10 +132,18 @@ func (pe *prepEncoder) offset() int { // stackable func (pe *prepEncoder) push(in pushEncoder) { + in.saveOffset(pe.length) pe.length += in.reserveLength() + pe.stack = append(pe.stack, in) } func (pe *prepEncoder) pop() error { + in := pe.stack[len(pe.stack)-1] + pe.stack = pe.stack[:len(pe.stack)-1] + if dpe, ok := in.(dynamicPushEncoder); ok { + pe.length += dpe.adjustLength(pe.length) + } + return nil } diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go index 40dc8015148..0ec4d8d53f9 100644 --- a/vendor/github.com/Shopify/sarama/produce_request.go +++ b/vendor/github.com/Shopify/sarama/produce_request.go @@ -21,19 +21,56 @@ const ( ) type ProduceRequest struct { - RequiredAcks RequiredAcks - Timeout int32 - Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10 - msgSets map[string]map[int32]*MessageSet + TransactionalID *string + RequiredAcks RequiredAcks + Timeout int32 + Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11 + records map[string]map[int32]Records +} + +func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + var topicRecordCount int64 + for _, messageBlock := range msgSet.Messages { + // Is this a fake "message" wrapping real messages? + if messageBlock.Msg.Set != nil { + topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) + } else { + // A single uncompressed message + topicRecordCount++ + } + // Better be safe than sorry when computing the compression ratio + if messageBlock.Msg.compressedSize != 0 { + compressionRatio := float64(len(messageBlock.Msg.Value)) / + float64(messageBlock.Msg.compressedSize) + // Histogram do not support decimal values, let's multiple it by 100 for better precision + intCompressionRatio := int64(100 * compressionRatio) + compressionRatioMetric.Update(intCompressionRatio) + topicCompressionRatioMetric.Update(intCompressionRatio) + } + } + return topicRecordCount +} + +func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + if recordBatch.compressedRecords != nil { + compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100) + compressionRatioMetric.Update(compressionRatio) + topicCompressionRatioMetric.Update(compressionRatio) + } + + return int64(len(recordBatch.Records)) } func (r *ProduceRequest) encode(pe packetEncoder) error { + if r.Version >= 3 { + if err := pe.putNullableString(r.TransactionalID); err != nil { + return err + } + } pe.putInt16(int16(r.RequiredAcks)) pe.putInt32(r.Timeout) - err := pe.putArrayLength(len(r.msgSets)) - if err != nil { - return err - } metricRegistry := pe.metricRegistry() var batchSizeMetric metrics.Histogram var compressionRatioMetric metrics.Histogram @@ -41,9 +78,14 @@ func (r *ProduceRequest) encode(pe packetEncoder) error { batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry) compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry) } - totalRecordCount := int64(0) - for topic, partitions := range r.msgSets { + + err := pe.putArrayLength(len(r.records)) + if err != nil { + return err + } + + for topic, partitions := range r.records { err = pe.putString(topic) if err != nil { return err @@ -57,11 +99,11 @@ func (r *ProduceRequest) encode(pe packetEncoder) error { if metricRegistry != nil { topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry) } - for id, msgSet := range partitions { + for id, records := range partitions { startOffset := pe.offset() pe.putInt32(id) pe.push(&lengthField{}) - err = msgSet.encode(pe) + err = records.encode(pe) if err != nil { return err } @@ -70,23 +112,10 @@ func (r *ProduceRequest) encode(pe packetEncoder) error { return err } if metricRegistry != nil { - for _, messageBlock := range msgSet.Messages { - // Is this a fake "message" wrapping real messages? - if messageBlock.Msg.Set != nil { - topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) - } else { - // A single uncompressed message - topicRecordCount++ - } - // Better be safe than sorry when computing the compression ratio - if messageBlock.Msg.compressedSize != 0 { - compressionRatio := float64(len(messageBlock.Msg.Value)) / - float64(messageBlock.Msg.compressedSize) - // Histogram do not support decimal values, let's multiple it by 100 for better precision - intCompressionRatio := int64(100 * compressionRatio) - compressionRatioMetric.Update(intCompressionRatio) - topicCompressionRatioMetric.Update(intCompressionRatio) - } + if r.Version >= 3 { + topicRecordCount += updateBatchMetrics(records.recordBatch, compressionRatioMetric, topicCompressionRatioMetric) + } else { + topicRecordCount += updateMsgSetMetrics(records.msgSet, compressionRatioMetric, topicCompressionRatioMetric) } batchSize := int64(pe.offset() - startOffset) batchSizeMetric.Update(batchSize) @@ -108,6 +137,15 @@ func (r *ProduceRequest) encode(pe packetEncoder) error { } func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + + if version >= 3 { + id, err := pd.getNullableString() + if err != nil { + return err + } + r.TransactionalID = id + } requiredAcks, err := pd.getInt16() if err != nil { return err @@ -123,7 +161,8 @@ func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { if topicCount == 0 { return nil } - r.msgSets = make(map[string]map[int32]*MessageSet) + + r.records = make(map[string]map[int32]Records) for i := 0; i < topicCount; i++ { topic, err := pd.getString() if err != nil { @@ -133,28 +172,29 @@ func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { if err != nil { return err } - r.msgSets[topic] = make(map[int32]*MessageSet) + r.records[topic] = make(map[int32]Records) + for j := 0; j < partitionCount; j++ { partition, err := pd.getInt32() if err != nil { return err } - messageSetSize, err := pd.getInt32() + size, err := pd.getInt32() if err != nil { return err } - msgSetDecoder, err := pd.getSubset(int(messageSetSize)) + recordsDecoder, err := pd.getSubset(int(size)) if err != nil { return err } - msgSet := &MessageSet{} - err = msgSet.decode(msgSetDecoder) - if err != nil { + var records Records + if err := records.decode(recordsDecoder); err != nil { return err } - r.msgSets[topic][partition] = msgSet + r.records[topic][partition] = records } } + return nil } @@ -172,38 +212,41 @@ func (r *ProduceRequest) requiredVersion() KafkaVersion { return V0_9_0_0 case 2: return V0_10_0_0 + case 3: + return V0_11_0_0 default: return minVersion } } -func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { - if r.msgSets == nil { - r.msgSets = make(map[string]map[int32]*MessageSet) +func (r *ProduceRequest) ensureRecords(topic string, partition int32) { + if r.records == nil { + r.records = make(map[string]map[int32]Records) } - if r.msgSets[topic] == nil { - r.msgSets[topic] = make(map[int32]*MessageSet) + if r.records[topic] == nil { + r.records[topic] = make(map[int32]Records) } +} - set := r.msgSets[topic][partition] +func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { + r.ensureRecords(topic, partition) + set := r.records[topic][partition].msgSet if set == nil { set = new(MessageSet) - r.msgSets[topic][partition] = set + r.records[topic][partition] = newLegacyRecords(set) } set.addMessage(msg) } func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { - if r.msgSets == nil { - r.msgSets = make(map[string]map[int32]*MessageSet) - } - - if r.msgSets[topic] == nil { - r.msgSets[topic] = make(map[int32]*MessageSet) - } + r.ensureRecords(topic, partition) + r.records[topic][partition] = newLegacyRecords(set) +} - r.msgSets[topic][partition] = set +func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) { + r.ensureRecords(topic, partition) + r.records[topic][partition] = newDefaultRecords(batch) } diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go index 3f05dd9fbdb..043c40f8772 100644 --- a/vendor/github.com/Shopify/sarama/produce_response.go +++ b/vendor/github.com/Shopify/sarama/produce_response.go @@ -1,6 +1,9 @@ package sarama -import "time" +import ( + "fmt" + "time" +) type ProduceResponseBlock struct { Err KError @@ -32,6 +35,23 @@ func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err erro return nil } +func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + pe.putInt64(b.Offset) + + if version >= 2 { + timestamp := int64(-1) + if !b.Timestamp.Before(time.Unix(0, 0)) { + timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond) + } else if !b.Timestamp.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)} + } + pe.putInt64(timestamp) + } + + return nil +} + type ProduceResponse struct { Blocks map[string]map[int32]*ProduceResponseBlock Version int16 @@ -103,8 +123,10 @@ func (r *ProduceResponse) encode(pe packetEncoder) error { } for id, prb := range partitions { pe.putInt32(id) - pe.putInt16(int16(prb.Err)) - pe.putInt64(prb.Offset) + err = prb.encode(pe, r.Version) + if err != nil { + return err + } } } if r.Version >= 1 { @@ -127,6 +149,8 @@ func (r *ProduceResponse) requiredVersion() KafkaVersion { return V0_9_0_0 case 2: return V0_10_0_0 + case 3: + return V0_11_0_0 default: return minVersion } diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go index 158d9c4754c..3cbaeb5f90e 100644 --- a/vendor/github.com/Shopify/sarama/produce_set.go +++ b/vendor/github.com/Shopify/sarama/produce_set.go @@ -1,11 +1,14 @@ package sarama -import "time" +import ( + "encoding/binary" + "time" +) type partitionSet struct { - msgs []*ProducerMessage - setToSend *MessageSet - bufferBytes int + msgs []*ProducerMessage + recordsToSend Records + bufferBytes int } type produceSet struct { @@ -39,31 +42,64 @@ func (ps *produceSet) add(msg *ProducerMessage) error { } } + timestamp := msg.Timestamp + if msg.Timestamp.IsZero() { + timestamp = time.Now() + } + partitions := ps.msgs[msg.Topic] if partitions == nil { partitions = make(map[int32]*partitionSet) ps.msgs[msg.Topic] = partitions } + var size int + set := partitions[msg.Partition] if set == nil { - set = &partitionSet{setToSend: new(MessageSet)} + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + batch := &RecordBatch{ + FirstTimestamp: timestamp, + Version: 2, + ProducerID: -1, /* No producer id */ + Codec: ps.parent.conf.Producer.Compression, + } + set = &partitionSet{recordsToSend: newDefaultRecords(batch)} + size = recordBatchOverhead + } else { + set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))} + } partitions[msg.Partition] = set } set.msgs = append(set.msgs, msg) - msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} - if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { - if msg.Timestamp.IsZero() { - msgToSend.Timestamp = time.Now() - } else { - msgToSend.Timestamp = msg.Timestamp + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + // We are being conservative here to avoid having to prep encode the record + size += maximumRecordOverhead + rec := &Record{ + Key: key, + Value: val, + TimestampDelta: timestamp.Sub(set.recordsToSend.recordBatch.FirstTimestamp), + } + size += len(key) + len(val) + if len(msg.Headers) > 0 { + rec.Headers = make([]*RecordHeader, len(msg.Headers)) + for i := range msg.Headers { + rec.Headers[i] = &msg.Headers[i] + size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32 + } } - msgToSend.Version = 1 + set.recordsToSend.recordBatch.addRecord(rec) + } else { + msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + msgToSend.Timestamp = timestamp + msgToSend.Version = 1 + } + set.recordsToSend.msgSet.addMessage(msgToSend) + size = producerMessageOverhead + len(key) + len(val) } - set.setToSend.addMessage(msgToSend) - size := producerMessageOverhead + len(key) + len(val) set.bufferBytes += size ps.bufferBytes += size ps.bufferCount++ @@ -79,17 +115,43 @@ func (ps *produceSet) buildRequest() *ProduceRequest { if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { req.Version = 2 } + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + req.Version = 3 + } for topic, partitionSet := range ps.msgs { for partition, set := range partitionSet { + if req.Version >= 3 { + rb := set.recordsToSend.recordBatch + if len(rb.Records) > 0 { + rb.LastOffsetDelta = int32(len(rb.Records) - 1) + for i, record := range rb.Records { + record.OffsetDelta = int64(i) + } + } + + req.AddBatch(topic, partition, rb) + continue + } if ps.parent.conf.Producer.Compression == CompressionNone { - req.AddSet(topic, partition, set.setToSend) + req.AddSet(topic, partition, set.recordsToSend.msgSet) } else { // When compression is enabled, the entire set for each partition is compressed // and sent as the payload of a single fake "message" with the appropriate codec // set and no key. When the server sees a message with a compression codec, it // decompresses the payload and treats the result as its message set. - payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry) + + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + // If our version is 0.10 or later, assign relative offsets + // to the inner messages. This lets the broker avoid + // recompressing the message set. + // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets + // for details on relative offsets.) + for i, msg := range set.recordsToSend.msgSet.Messages { + msg.Offset = int64(i) + } + } + payload, err := encode(set.recordsToSend.msgSet, ps.parent.conf.MetricRegistry) if err != nil { Logger.Println(err) // if this happens, it's basically our fault. panic(err) @@ -98,11 +160,11 @@ func (ps *produceSet) buildRequest() *ProduceRequest { Codec: ps.parent.conf.Producer.Compression, Key: nil, Value: payload, - Set: set.setToSend, // Provide the underlying message set for accurate metrics + Set: set.recordsToSend.msgSet, // Provide the underlying message set for accurate metrics } if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { compMsg.Version = 1 - compMsg.Timestamp = set.setToSend.Messages[0].Msg.Timestamp + compMsg.Timestamp = set.recordsToSend.msgSet.Messages[0].Msg.Timestamp } req.AddMessage(topic, partition, compMsg) } @@ -135,14 +197,19 @@ func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMe } func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { + version := 1 + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } + switch { // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. - case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)): + case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)): return true // Would we overflow the size-limit of a compressed message-batch for this partition? case ps.parent.conf.Producer.Compression != CompressionNone && ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && - ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes: + ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: return true // Would we overflow simply in number of messages? case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go index 3cf93533a6a..23045e7d33a 100644 --- a/vendor/github.com/Shopify/sarama/real_decoder.go +++ b/vendor/github.com/Shopify/sarama/real_decoder.go @@ -7,8 +7,11 @@ import ( var errInvalidArrayLength = PacketDecodingError{"invalid array length"} var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} +var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"} var errInvalidStringLength = PacketDecodingError{"invalid string length"} var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"} +var errVarintOverflow = PacketDecodingError{"varint overflow"} +var errInvalidBool = PacketDecodingError{"invalid bool"} type realDecoder struct { raw []byte @@ -58,12 +61,26 @@ func (rd *realDecoder) getInt64() (int64, error) { return tmp, nil } +func (rd *realDecoder) getVarint() (int64, error) { + tmp, n := binary.Varint(rd.raw[rd.off:]) + if n == 0 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + if n < 0 { + rd.off -= n + return -1, errVarintOverflow + } + rd.off += n + return tmp, nil +} + func (rd *realDecoder) getArrayLength() (int, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) return -1, ErrInsufficientData } - tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))) rd.off += 4 if tmp > rd.remaining() { rd.off = len(rd.raw) @@ -74,53 +91,66 @@ func (rd *realDecoder) getArrayLength() (int, error) { return tmp, nil } +func (rd *realDecoder) getBool() (bool, error) { + b, err := rd.getInt8() + if err != nil || b == 0 { + return false, err + } + if b != 1 { + return false, errInvalidBool + } + return true, nil +} + // collections func (rd *realDecoder) getBytes() ([]byte, error) { tmp, err := rd.getInt32() - if err != nil { return nil, err } + if tmp == -1 { + return nil, nil + } - n := int(tmp) + return rd.getRawBytes(int(tmp)) +} - switch { - case n < -1: - return nil, errInvalidByteSliceLength - case n == -1: +func (rd *realDecoder) getVarintBytes() ([]byte, error) { + tmp, err := rd.getVarint() + if err != nil { + return nil, err + } + if tmp == -1 { return nil, nil - case n == 0: - return make([]byte, 0), nil - case n > rd.remaining(): - rd.off = len(rd.raw) - return nil, ErrInsufficientData } - tmpStr := rd.raw[rd.off : rd.off+n] - rd.off += n - return tmpStr, nil + return rd.getRawBytes(int(tmp)) } -func (rd *realDecoder) getString() (string, error) { - tmp, err := rd.getInt16() - +func (rd *realDecoder) getStringLength() (int, error) { + length, err := rd.getInt16() if err != nil { - return "", err + return 0, err } - n := int(tmp) + n := int(length) switch { case n < -1: - return "", errInvalidStringLength - case n == -1: - return "", nil - case n == 0: - return "", nil + return 0, errInvalidStringLength case n > rd.remaining(): rd.off = len(rd.raw) - return "", ErrInsufficientData + return 0, ErrInsufficientData + } + + return n, nil +} + +func (rd *realDecoder) getString() (string, error) { + n, err := rd.getStringLength() + if err != nil || n == -1 { + return "", err } tmpStr := string(rd.raw[rd.off : rd.off+n]) @@ -128,6 +158,17 @@ func (rd *realDecoder) getString() (string, error) { return tmpStr, nil } +func (rd *realDecoder) getNullableString() (*string, error) { + n, err := rd.getStringLength() + if err != nil || n == -1 { + return nil, err + } + + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return &tmpStr, err +} + func (rd *realDecoder) getInt32Array() ([]int32, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) @@ -221,8 +262,16 @@ func (rd *realDecoder) remaining() int { } func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { + buf, err := rd.getRawBytes(length) + if err != nil { + return nil, err + } + return &realDecoder{raw: buf}, nil +} + +func (rd *realDecoder) getRawBytes(length int) ([]byte, error) { if length < 0 { - return nil, errInvalidSubsetSize + return nil, errInvalidByteSliceLength } else if length > rd.remaining() { rd.off = len(rd.raw) return nil, ErrInsufficientData @@ -230,7 +279,15 @@ func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { start := rd.off rd.off += length - return &realDecoder{raw: rd.raw[start:rd.off]}, nil + return rd.raw[start:rd.off], nil +} + +func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) { + if rd.remaining() < offset+length { + return nil, ErrInsufficientData + } + off := rd.off + offset + return &realDecoder{raw: rd.raw[off : off+length]}, nil } // stacks @@ -238,10 +295,17 @@ func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { func (rd *realDecoder) push(in pushDecoder) error { in.saveOffset(rd.off) - reserve := in.reserveLength() - if rd.remaining() < reserve { - rd.off = len(rd.raw) - return ErrInsufficientData + var reserve int + if dpd, ok := in.(dynamicPushDecoder); ok { + if err := dpd.decode(rd); err != nil { + return err + } + } else { + reserve = in.reserveLength() + if rd.remaining() < reserve { + rd.off = len(rd.raw) + return ErrInsufficientData + } } rd.stack = append(rd.stack, in) diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go index ced4267c39e..3c75387f779 100644 --- a/vendor/github.com/Shopify/sarama/real_encoder.go +++ b/vendor/github.com/Shopify/sarama/real_encoder.go @@ -35,11 +35,23 @@ func (re *realEncoder) putInt64(in int64) { re.off += 8 } +func (re *realEncoder) putVarint(in int64) { + re.off += binary.PutVarint(re.raw[re.off:], in) +} + func (re *realEncoder) putArrayLength(in int) error { re.putInt32(int32(in)) return nil } +func (re *realEncoder) putBool(in bool) { + if in { + re.putInt8(1) + return + } + re.putInt8(0) +} + // collection func (re *realEncoder) putRawBytes(in []byte) error { @@ -54,9 +66,16 @@ func (re *realEncoder) putBytes(in []byte) error { return nil } re.putInt32(int32(len(in))) - copy(re.raw[re.off:], in) - re.off += len(in) - return nil + return re.putRawBytes(in) +} + +func (re *realEncoder) putVarintBytes(in []byte) error { + if in == nil { + re.putVarint(-1) + return nil + } + re.putVarint(int64(len(in))) + return re.putRawBytes(in) } func (re *realEncoder) putString(in string) error { @@ -66,6 +85,14 @@ func (re *realEncoder) putString(in string) error { return nil } +func (re *realEncoder) putNullableString(in *string) error { + if in == nil { + re.putInt16(-1) + return nil + } + return re.putString(*in) +} + func (re *realEncoder) putStringArray(in []string) error { err := re.putArrayLength(len(in)) if err != nil { diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go new file mode 100644 index 00000000000..cded308cf0f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record.go @@ -0,0 +1,113 @@ +package sarama + +import ( + "encoding/binary" + "time" +) + +const ( + controlMask = 0x20 + maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1 +) + +type RecordHeader struct { + Key []byte + Value []byte +} + +func (h *RecordHeader) encode(pe packetEncoder) error { + if err := pe.putVarintBytes(h.Key); err != nil { + return err + } + return pe.putVarintBytes(h.Value) +} + +func (h *RecordHeader) decode(pd packetDecoder) (err error) { + if h.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if h.Value, err = pd.getVarintBytes(); err != nil { + return err + } + return nil +} + +type Record struct { + Attributes int8 + TimestampDelta time.Duration + OffsetDelta int64 + Key []byte + Value []byte + Headers []*RecordHeader + + length varintLengthField +} + +func (r *Record) encode(pe packetEncoder) error { + pe.push(&r.length) + pe.putInt8(r.Attributes) + pe.putVarint(int64(r.TimestampDelta / time.Millisecond)) + pe.putVarint(r.OffsetDelta) + if err := pe.putVarintBytes(r.Key); err != nil { + return err + } + if err := pe.putVarintBytes(r.Value); err != nil { + return err + } + pe.putVarint(int64(len(r.Headers))) + + for _, h := range r.Headers { + if err := h.encode(pe); err != nil { + return err + } + } + + return pe.pop() +} + +func (r *Record) decode(pd packetDecoder) (err error) { + if err = pd.push(&r.length); err != nil { + return err + } + + if r.Attributes, err = pd.getInt8(); err != nil { + return err + } + + timestamp, err := pd.getVarint() + if err != nil { + return err + } + r.TimestampDelta = time.Duration(timestamp) * time.Millisecond + + if r.OffsetDelta, err = pd.getVarint(); err != nil { + return err + } + + if r.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if r.Value, err = pd.getVarintBytes(); err != nil { + return err + } + + numHeaders, err := pd.getVarint() + if err != nil { + return err + } + + if numHeaders >= 0 { + r.Headers = make([]*RecordHeader, numHeaders) + } + for i := int64(0); i < numHeaders; i++ { + hdr := new(RecordHeader) + if err := hdr.decode(pd); err != nil { + return err + } + r.Headers[i] = hdr + } + + return pd.pop() +} diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go new file mode 100644 index 00000000000..321de485b0d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record_batch.go @@ -0,0 +1,259 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "time" + + "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +const recordBatchOverhead = 49 + +type recordsArray []*Record + +func (e recordsArray) encode(pe packetEncoder) error { + for _, r := range e { + if err := r.encode(pe); err != nil { + return err + } + } + return nil +} + +func (e recordsArray) decode(pd packetDecoder) error { + for i := range e { + rec := &Record{} + if err := rec.decode(pd); err != nil { + return err + } + e[i] = rec + } + return nil +} + +type RecordBatch struct { + FirstOffset int64 + PartitionLeaderEpoch int32 + Version int8 + Codec CompressionCodec + Control bool + LastOffsetDelta int32 + FirstTimestamp time.Time + MaxTimestamp time.Time + ProducerID int64 + ProducerEpoch int16 + FirstSequence int32 + Records []*Record + PartialTrailingRecord bool + + compressedRecords []byte + recordsLen int // uncompressed records size +} + +func (b *RecordBatch) encode(pe packetEncoder) error { + if b.Version != 2 { + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} + } + pe.putInt64(b.FirstOffset) + pe.push(&lengthField{}) + pe.putInt32(b.PartitionLeaderEpoch) + pe.putInt8(b.Version) + pe.push(newCRC32Field(crcCastagnoli)) + pe.putInt16(b.computeAttributes()) + pe.putInt32(b.LastOffsetDelta) + + if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil { + return err + } + + if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil { + return err + } + + pe.putInt64(b.ProducerID) + pe.putInt16(b.ProducerEpoch) + pe.putInt32(b.FirstSequence) + + if err := pe.putArrayLength(len(b.Records)); err != nil { + return err + } + + if b.compressedRecords == nil { + if err := b.encodeRecords(pe); err != nil { + return err + } + } + if err := pe.putRawBytes(b.compressedRecords); err != nil { + return err + } + + if err := pe.pop(); err != nil { + return err + } + return pe.pop() +} + +func (b *RecordBatch) decode(pd packetDecoder) (err error) { + if b.FirstOffset, err = pd.getInt64(); err != nil { + return err + } + + batchLen, err := pd.getInt32() + if err != nil { + return err + } + + if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + + if b.Version, err = pd.getInt8(); err != nil { + return err + } + + if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil { + return err + } + + attributes, err := pd.getInt16() + if err != nil { + return err + } + b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask) + b.Control = attributes&controlMask == controlMask + + if b.LastOffsetDelta, err = pd.getInt32(); err != nil { + return err + } + + if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil { + return err + } + + if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil { + return err + } + + if b.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if b.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + if b.FirstSequence, err = pd.getInt32(); err != nil { + return err + } + + numRecs, err := pd.getArrayLength() + if err != nil { + return err + } + if numRecs >= 0 { + b.Records = make([]*Record, numRecs) + } + + bufSize := int(batchLen) - recordBatchOverhead + recBuffer, err := pd.getRawBytes(bufSize) + if err != nil { + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err + } + + if err = pd.pop(); err != nil { + return err + } + + switch b.Codec { + case CompressionNone: + case CompressionGZIP: + reader, err := gzip.NewReader(bytes.NewReader(recBuffer)) + if err != nil { + return err + } + if recBuffer, err = ioutil.ReadAll(reader); err != nil { + return err + } + case CompressionSnappy: + if recBuffer, err = snappy.Decode(recBuffer); err != nil { + return err + } + case CompressionLZ4: + reader := lz4.NewReader(bytes.NewReader(recBuffer)) + if recBuffer, err = ioutil.ReadAll(reader); err != nil { + return err + } + default: + return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", b.Codec)} + } + + b.recordsLen = len(recBuffer) + err = decode(recBuffer, recordsArray(b.Records)) + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err +} + +func (b *RecordBatch) encodeRecords(pe packetEncoder) error { + var raw []byte + var err error + if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil { + return err + } + b.recordsLen = len(raw) + + switch b.Codec { + case CompressionNone: + b.compressedRecords = raw + case CompressionGZIP: + var buf bytes.Buffer + writer := gzip.NewWriter(&buf) + if _, err := writer.Write(raw); err != nil { + return err + } + if err := writer.Close(); err != nil { + return err + } + b.compressedRecords = buf.Bytes() + case CompressionSnappy: + b.compressedRecords = snappy.Encode(raw) + case CompressionLZ4: + var buf bytes.Buffer + writer := lz4.NewWriter(&buf) + if _, err := writer.Write(raw); err != nil { + return err + } + if err := writer.Close(); err != nil { + return err + } + b.compressedRecords = buf.Bytes() + default: + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} + } + + return nil +} + +func (b *RecordBatch) computeAttributes() int16 { + attr := int16(b.Codec) & int16(compressionCodecMask) + if b.Control { + attr |= controlMask + } + return attr +} + +func (b *RecordBatch) addRecord(r *Record) { + b.Records = append(b.Records, r) +} diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go new file mode 100644 index 00000000000..258dcbac880 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/records.go @@ -0,0 +1,173 @@ +package sarama + +import "fmt" + +const ( + unknownRecords = iota + legacyRecords + defaultRecords + + magicOffset = 16 + magicLength = 1 +) + +// Records implements a union type containing either a RecordBatch or a legacy MessageSet. +type Records struct { + recordsType int + msgSet *MessageSet + recordBatch *RecordBatch +} + +func newLegacyRecords(msgSet *MessageSet) Records { + return Records{recordsType: legacyRecords, msgSet: msgSet} +} + +func newDefaultRecords(batch *RecordBatch) Records { + return Records{recordsType: defaultRecords, recordBatch: batch} +} + +// setTypeFromFields sets type of Records depending on which of msgSet or recordBatch is not nil. +// The first return value indicates whether both fields are nil (and the type is not set). +// If both fields are not nil, it returns an error. +func (r *Records) setTypeFromFields() (bool, error) { + if r.msgSet == nil && r.recordBatch == nil { + return true, nil + } + if r.msgSet != nil && r.recordBatch != nil { + return false, fmt.Errorf("both msgSet and recordBatch are set, but record type is unknown") + } + r.recordsType = defaultRecords + if r.msgSet != nil { + r.recordsType = legacyRecords + } + return false, nil +} + +func (r *Records) encode(pe packetEncoder) error { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return err + } + } + + switch r.recordsType { + case legacyRecords: + if r.msgSet == nil { + return nil + } + return r.msgSet.encode(pe) + case defaultRecords: + if r.recordBatch == nil { + return nil + } + return r.recordBatch.encode(pe) + } + + return fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) setTypeFromMagic(pd packetDecoder) error { + magic, err := magicValue(pd) + if err != nil { + return err + } + + r.recordsType = defaultRecords + if magic < 2 { + r.recordsType = legacyRecords + } + + return nil +} + +func (r *Records) decode(pd packetDecoder) error { + if r.recordsType == unknownRecords { + if err := r.setTypeFromMagic(pd); err != nil { + return err + } + } + + switch r.recordsType { + case legacyRecords: + r.msgSet = &MessageSet{} + return r.msgSet.decode(pd) + case defaultRecords: + r.recordBatch = &RecordBatch{} + return r.recordBatch.decode(pd) + } + return fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) numRecords() (int, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return 0, err + } + } + + switch r.recordsType { + case legacyRecords: + if r.msgSet == nil { + return 0, nil + } + return len(r.msgSet.Messages), nil + case defaultRecords: + if r.recordBatch == nil { + return 0, nil + } + return len(r.recordBatch.Records), nil + } + return 0, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isPartial() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case unknownRecords: + return false, nil + case legacyRecords: + if r.msgSet == nil { + return false, nil + } + return r.msgSet.PartialTrailingMessage, nil + case defaultRecords: + if r.recordBatch == nil { + return false, nil + } + return r.recordBatch.PartialTrailingRecord, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isControl() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case legacyRecords: + return false, nil + case defaultRecords: + if r.recordBatch == nil { + return false, nil + } + return r.recordBatch.Control, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func magicValue(pd packetDecoder) (int8, error) { + dec, err := pd.peek(magicOffset, magicLength) + if err != nil { + return 0, err + } + + return dec.getInt8() +} diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go index 73310ca8705..5f7cb76e95b 100644 --- a/vendor/github.com/Shopify/sarama/request.go +++ b/vendor/github.com/Shopify/sarama/request.go @@ -114,6 +114,32 @@ func allocateBody(key, version int16) protocolBody { return &SaslHandshakeRequest{} case 18: return &ApiVersionsRequest{} + case 19: + return &CreateTopicsRequest{} + case 20: + return &DeleteTopicsRequest{} + case 22: + return &InitProducerIDRequest{} + case 24: + return &AddPartitionsToTxnRequest{} + case 25: + return &AddOffsetsToTxnRequest{} + case 26: + return &EndTxnRequest{} + case 28: + return &TxnOffsetCommitRequest{} + case 29: + return &DescribeAclsRequest{} + case 30: + return &CreateAclsRequest{} + case 31: + return &DeleteAclsRequest{} + case 32: + return &DescribeConfigsRequest{} + case 33: + return &AlterConfigsRequest{} + case 37: + return &CreatePartitionsRequest{} } return nil } diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go new file mode 100644 index 00000000000..372278d0bfa --- /dev/null +++ b/vendor/github.com/Shopify/sarama/timestamp.go @@ -0,0 +1,40 @@ +package sarama + +import ( + "fmt" + "time" +) + +type Timestamp struct { + *time.Time +} + +func (t Timestamp) encode(pe packetEncoder) error { + timestamp := int64(-1) + + if !t.Before(time.Unix(0, 0)) { + timestamp = t.UnixNano() / int64(time.Millisecond) + } else if !t.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)} + } + + pe.putInt64(timestamp) + return nil +} + +func (t Timestamp) decode(pd packetDecoder) error { + millis, err := pd.getInt64() + if err != nil { + return err + } + + // negative timestamps are invalid, in these cases we should return + // a zero time + timestamp := time.Time{} + if millis >= 0 { + timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + + *t.Time = timestamp + return nil +} diff --git a/vendor/github.com/Shopify/sarama/tools/README.md b/vendor/github.com/Shopify/sarama/tools/README.md new file mode 100644 index 00000000000..3464c4ad804 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/tools/README.md @@ -0,0 +1,10 @@ +# Sarama tools + +This folder contains applications that are useful for exploration of your Kafka cluster, or instrumentation. +Some of these tools mirror tools that ship with Kafka, but these tools won't require installing the JVM to function. + +- [kafka-console-producer](./kafka-console-producer): a command line tool to produce a single message to your Kafka custer. +- [kafka-console-partitionconsumer](./kafka-console-partitionconsumer): (deprecated) a command line tool to consume a single partition of a topic on your Kafka cluster. +- [kafka-console-consumer](./kafka-console-consumer): a command line tool to consume arbitrary partitions of a topic on your Kafka cluster. + +To install all tools, run `go get github.com/Shopify/sarama/tools/...` diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md new file mode 100644 index 00000000000..4e77f0b7050 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md @@ -0,0 +1,29 @@ +# kafka-console-consumer + +A simple command line tool to consume partitions of a topic and print the +messages on the standard output. + +### Installation + + go get github.com/Shopify/sarama/tools/kafka-console-consumer + +### Usage + + # Minimum invocation + kafka-console-consumer -topic=test -brokers=kafka1:9092 + + # It will pick up a KAFKA_PEERS environment variable + export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 + kafka-console-consumer -topic=test + + # You can specify the offset you want to start at. It can be either + # `oldest`, `newest`. The default is `newest`. + kafka-console-consumer -topic=test -offset=oldest + kafka-console-consumer -topic=test -offset=newest + + # You can specify the partition(s) you want to consume as a comma-separated + # list. The default is `all`. + kafka-console-consumer -topic=test -partitions=1,2,3 + + # Display all command line options + kafka-console-consumer -help diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go new file mode 100644 index 00000000000..0f1eb89a906 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go @@ -0,0 +1,145 @@ +package main + +import ( + "flag" + "fmt" + "log" + "os" + "os/signal" + "strconv" + "strings" + "sync" + + "github.com/Shopify/sarama" +) + +var ( + brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster") + topic = flag.String("topic", "", "REQUIRED: the topic to consume") + partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers") + offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`") + verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging") + bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.") + + logger = log.New(os.Stderr, "", log.LstdFlags) +) + +func main() { + flag.Parse() + + if *brokerList == "" { + printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.") + } + + if *topic == "" { + printUsageErrorAndExit("-topic is required") + } + + if *verbose { + sarama.Logger = logger + } + + var initialOffset int64 + switch *offset { + case "oldest": + initialOffset = sarama.OffsetOldest + case "newest": + initialOffset = sarama.OffsetNewest + default: + printUsageErrorAndExit("-offset should be `oldest` or `newest`") + } + + c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil) + if err != nil { + printErrorAndExit(69, "Failed to start consumer: %s", err) + } + + partitionList, err := getPartitions(c) + if err != nil { + printErrorAndExit(69, "Failed to get the list of partitions: %s", err) + } + + var ( + messages = make(chan *sarama.ConsumerMessage, *bufferSize) + closing = make(chan struct{}) + wg sync.WaitGroup + ) + + go func() { + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Kill, os.Interrupt) + <-signals + logger.Println("Initiating shutdown of consumer...") + close(closing) + }() + + for _, partition := range partitionList { + pc, err := c.ConsumePartition(*topic, partition, initialOffset) + if err != nil { + printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err) + } + + go func(pc sarama.PartitionConsumer) { + <-closing + pc.AsyncClose() + }(pc) + + wg.Add(1) + go func(pc sarama.PartitionConsumer) { + defer wg.Done() + for message := range pc.Messages() { + messages <- message + } + }(pc) + } + + go func() { + for msg := range messages { + fmt.Printf("Partition:\t%d\n", msg.Partition) + fmt.Printf("Offset:\t%d\n", msg.Offset) + fmt.Printf("Key:\t%s\n", string(msg.Key)) + fmt.Printf("Value:\t%s\n", string(msg.Value)) + fmt.Println() + } + }() + + wg.Wait() + logger.Println("Done consuming topic", *topic) + close(messages) + + if err := c.Close(); err != nil { + logger.Println("Failed to close consumer: ", err) + } +} + +func getPartitions(c sarama.Consumer) ([]int32, error) { + if *partitions == "all" { + return c.Partitions(*topic) + } + + tmp := strings.Split(*partitions, ",") + var pList []int32 + for i := range tmp { + val, err := strconv.ParseInt(tmp[i], 10, 32) + if err != nil { + return nil, err + } + pList = append(pList, int32(val)) + } + + return pList, nil +} + +func printErrorAndExit(code int, format string, values ...interface{}) { + fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) + fmt.Fprintln(os.Stderr) + os.Exit(code) +} + +func printUsageErrorAndExit(format string, values ...interface{}) { + fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) + fmt.Fprintln(os.Stderr) + fmt.Fprintln(os.Stderr, "Available command line options:") + flag.PrintDefaults() + os.Exit(64) +} diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md new file mode 100644 index 00000000000..646dd5f5c29 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md @@ -0,0 +1,28 @@ +# kafka-console-partitionconsumer + +NOTE: this tool is deprecated in favour of the more general and more powerful +`kafka-console-consumer`. + +A simple command line tool to consume a partition of a topic and print the messages +on the standard output. + +### Installation + + go get github.com/Shopify/sarama/tools/kafka-console-partitionconsumer + +### Usage + + # Minimum invocation + kafka-console-partitionconsumer -topic=test -partition=4 -brokers=kafka1:9092 + + # It will pick up a KAFKA_PEERS environment variable + export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 + kafka-console-partitionconsumer -topic=test -partition=4 + + # You can specify the offset you want to start at. It can be either + # `oldest`, `newest`, or a specific offset number + kafka-console-partitionconsumer -topic=test -partition=3 -offset=oldest + kafka-console-partitionconsumer -topic=test -partition=2 -offset=1337 + + # Display all command line options + kafka-console-partitionconsumer -help diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go new file mode 100644 index 00000000000..d5e4464de18 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go @@ -0,0 +1,102 @@ +package main + +import ( + "flag" + "fmt" + "log" + "os" + "os/signal" + "strconv" + "strings" + + "github.com/Shopify/sarama" +) + +var ( + brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster") + topic = flag.String("topic", "", "REQUIRED: the topic to consume") + partition = flag.Int("partition", -1, "REQUIRED: the partition to consume") + offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`, or an actual offset") + verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging") + + logger = log.New(os.Stderr, "", log.LstdFlags) +) + +func main() { + flag.Parse() + + if *brokerList == "" { + printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.") + } + + if *topic == "" { + printUsageErrorAndExit("-topic is required") + } + + if *partition == -1 { + printUsageErrorAndExit("-partition is required") + } + + if *verbose { + sarama.Logger = logger + } + + var ( + initialOffset int64 + offsetError error + ) + switch *offset { + case "oldest": + initialOffset = sarama.OffsetOldest + case "newest": + initialOffset = sarama.OffsetNewest + default: + initialOffset, offsetError = strconv.ParseInt(*offset, 10, 64) + } + + if offsetError != nil { + printUsageErrorAndExit("Invalid initial offset: %s", *offset) + } + + c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil) + if err != nil { + printErrorAndExit(69, "Failed to start consumer: %s", err) + } + + pc, err := c.ConsumePartition(*topic, int32(*partition), initialOffset) + if err != nil { + printErrorAndExit(69, "Failed to start partition consumer: %s", err) + } + + go func() { + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Kill, os.Interrupt) + <-signals + pc.AsyncClose() + }() + + for msg := range pc.Messages() { + fmt.Printf("Offset:\t%d\n", msg.Offset) + fmt.Printf("Key:\t%s\n", string(msg.Key)) + fmt.Printf("Value:\t%s\n", string(msg.Value)) + fmt.Println() + } + + if err := c.Close(); err != nil { + logger.Println("Failed to close consumer: ", err) + } +} + +func printErrorAndExit(code int, format string, values ...interface{}) { + fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) + fmt.Fprintln(os.Stderr) + os.Exit(code) +} + +func printUsageErrorAndExit(format string, values ...interface{}) { + fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) + fmt.Fprintln(os.Stderr) + fmt.Fprintln(os.Stderr, "Available command line options:") + flag.PrintDefaults() + os.Exit(64) +} diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md new file mode 100644 index 00000000000..6b3a65f211a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md @@ -0,0 +1,34 @@ +# kafka-console-producer + +A simple command line tool to produce a single message to Kafka. + +### Installation + + go get github.com/Shopify/sarama/tools/kafka-console-producer + + +### Usage + + # Minimum invocation + kafka-console-producer -topic=test -value=value -brokers=kafka1:9092 + + # It will pick up a KAFKA_PEERS environment variable + export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 + kafka-console-producer -topic=test -value=value + + # It will read the value from stdin by using pipes + echo "hello world" | kafka-console-producer -topic=test + + # Specify a key: + echo "hello world" | kafka-console-producer -topic=test -key=key + + # Partitioning: by default, kafka-console-producer will partition as follows: + # - manual partitioning if a -partition is provided + # - hash partitioning by key if a -key is provided + # - random partioning otherwise. + # + # You can override this using the -partitioner argument: + echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random + + # Display all command line options + kafka-console-producer -help diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go new file mode 100644 index 00000000000..83054ed78a1 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go @@ -0,0 +1,124 @@ +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + + "github.com/Shopify/sarama" + "github.com/rcrowley/go-metrics" +) + +var ( + brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster. You can also set the KAFKA_PEERS environment variable") + topic = flag.String("topic", "", "REQUIRED: the topic to produce to") + key = flag.String("key", "", "The key of the message to produce. Can be empty.") + value = flag.String("value", "", "REQUIRED: the value of the message to produce. You can also provide the value on stdin.") + partitioner = flag.String("partitioner", "", "The partitioning scheme to use. Can be `hash`, `manual`, or `random`") + partition = flag.Int("partition", -1, "The partition to produce to.") + verbose = flag.Bool("verbose", false, "Turn on sarama logging to stderr") + showMetrics = flag.Bool("metrics", false, "Output metrics on successful publish to stderr") + silent = flag.Bool("silent", false, "Turn off printing the message's topic, partition, and offset to stdout") + + logger = log.New(os.Stderr, "", log.LstdFlags) +) + +func main() { + flag.Parse() + + if *brokerList == "" { + printUsageErrorAndExit("no -brokers specified. Alternatively, set the KAFKA_PEERS environment variable") + } + + if *topic == "" { + printUsageErrorAndExit("no -topic specified") + } + + if *verbose { + sarama.Logger = logger + } + + config := sarama.NewConfig() + config.Producer.RequiredAcks = sarama.WaitForAll + config.Producer.Return.Successes = true + + switch *partitioner { + case "": + if *partition >= 0 { + config.Producer.Partitioner = sarama.NewManualPartitioner + } else { + config.Producer.Partitioner = sarama.NewHashPartitioner + } + case "hash": + config.Producer.Partitioner = sarama.NewHashPartitioner + case "random": + config.Producer.Partitioner = sarama.NewRandomPartitioner + case "manual": + config.Producer.Partitioner = sarama.NewManualPartitioner + if *partition == -1 { + printUsageErrorAndExit("-partition is required when partitioning manually") + } + default: + printUsageErrorAndExit(fmt.Sprintf("Partitioner %s not supported.", *partitioner)) + } + + message := &sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)} + + if *key != "" { + message.Key = sarama.StringEncoder(*key) + } + + if *value != "" { + message.Value = sarama.StringEncoder(*value) + } else if stdinAvailable() { + bytes, err := ioutil.ReadAll(os.Stdin) + if err != nil { + printErrorAndExit(66, "Failed to read data from the standard input: %s", err) + } + message.Value = sarama.ByteEncoder(bytes) + } else { + printUsageErrorAndExit("-value is required, or you have to provide the value on stdin") + } + + producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config) + if err != nil { + printErrorAndExit(69, "Failed to open Kafka producer: %s", err) + } + defer func() { + if err := producer.Close(); err != nil { + logger.Println("Failed to close Kafka producer cleanly:", err) + } + }() + + partition, offset, err := producer.SendMessage(message) + if err != nil { + printErrorAndExit(69, "Failed to produce message: %s", err) + } else if !*silent { + fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset) + } + if *showMetrics { + metrics.WriteOnce(config.MetricRegistry, os.Stderr) + } +} + +func printErrorAndExit(code int, format string, values ...interface{}) { + fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) + fmt.Fprintln(os.Stderr) + os.Exit(code) +} + +func printUsageErrorAndExit(message string) { + fmt.Fprintln(os.Stderr, "ERROR:", message) + fmt.Fprintln(os.Stderr) + fmt.Fprintln(os.Stderr, "Available command line options:") + flag.PrintDefaults() + os.Exit(64) +} + +func stdinAvailable() bool { + stat, _ := os.Stdin.Stat() + return (stat.Mode() & os.ModeCharDevice) == 0 +} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go new file mode 100644 index 00000000000..71e95b814cb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go @@ -0,0 +1,126 @@ +package sarama + +type TxnOffsetCommitRequest struct { + TransactionalID string + GroupID string + ProducerID int64 + ProducerEpoch int16 + Topics map[string][]*PartitionOffsetMetadata +} + +func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error { + if err := pe.putString(t.TransactionalID); err != nil { + return err + } + if err := pe.putString(t.GroupID); err != nil { + return err + } + pe.putInt64(t.ProducerID) + pe.putInt16(t.ProducerEpoch) + + if err := pe.putArrayLength(len(t.Topics)); err != nil { + return err + } + for topic, partitions := range t.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for _, partition := range partitions { + if err := partition.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + if t.TransactionalID, err = pd.getString(); err != nil { + return err + } + if t.GroupID, err = pd.getString(); err != nil { + return err + } + if t.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if t.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics = make(map[string][]*PartitionOffsetMetadata) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics[topic] = make([]*PartitionOffsetMetadata, m) + + for j := 0; j < m; j++ { + partitionOffsetMetadata := new(PartitionOffsetMetadata) + if err := partitionOffsetMetadata.decode(pd, version); err != nil { + return err + } + t.Topics[topic][j] = partitionOffsetMetadata + } + } + + return nil +} + +func (a *TxnOffsetCommitRequest) key() int16 { + return 28 +} + +func (a *TxnOffsetCommitRequest) version() int16 { + return 0 +} + +func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type PartitionOffsetMetadata struct { + Partition int32 + Offset int64 + Metadata *string +} + +func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error { + pe.putInt32(p.Partition) + pe.putInt64(p.Offset) + if err := pe.putNullableString(p.Metadata); err != nil { + return err + } + + return nil +} + +func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) { + if p.Partition, err = pd.getInt32(); err != nil { + return err + } + if p.Offset, err = pd.getInt64(); err != nil { + return err + } + if p.Metadata, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go new file mode 100644 index 00000000000..6c980f4066f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go @@ -0,0 +1,83 @@ +package sarama + +import ( + "time" +) + +type TxnOffsetCommitResponse struct { + ThrottleTime time.Duration + Topics map[string][]*PartitionError +} + +func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(t.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(t.Topics)); err != nil { + return err + } + + for topic, e := range t.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(e)); err != nil { + return err + } + for _, partitionError := range e { + if err := partitionError.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics = make(map[string][]*PartitionError) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics[topic] = make([]*PartitionError, m) + + for j := 0; j < m; j++ { + t.Topics[topic][j] = new(PartitionError) + if err := t.Topics[topic][j].decode(pd, version); err != nil { + return err + } + } + } + + return nil +} + +func (a *TxnOffsetCommitResponse) key() int16 { + return 28 +} + +func (a *TxnOffsetCommitResponse) version() int16 { + return 0 +} + +func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go index d36db921089..9d7b60f1614 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -2,8 +2,9 @@ package sarama import ( "bufio" + "fmt" "net" - "sort" + "regexp" ) type none struct{} @@ -23,13 +24,11 @@ func (slice int32Slice) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } -func dupeAndSort(input []int32) []int32 { +func dupInt32Slice(input []int32) []int32 { ret := make([]int32, 0, len(input)) for _, val := range input { ret = append(ret, val) } - - sort.Sort(int32Slice(ret)) return ret } @@ -149,5 +148,37 @@ var ( V0_10_0_1 = newKafkaVersion(0, 10, 0, 1) V0_10_1_0 = newKafkaVersion(0, 10, 1, 0) V0_10_2_0 = newKafkaVersion(0, 10, 2, 0) + V0_11_0_0 = newKafkaVersion(0, 11, 0, 0) + V1_0_0_0 = newKafkaVersion(1, 0, 0, 0) minVersion = V0_8_2_0 ) + +func ParseKafkaVersion(s string) (KafkaVersion, error) { + var major, minor, veryMinor, patch uint + var err error + if s[0] == '0' { + err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch}) + } else { + err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) + } + if err != nil { + return minVersion, err + } + return newKafkaVersion(major, minor, veryMinor, patch), nil +} + +func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error { + if !regexp.MustCompile(pattern).MatchString(s) { + return fmt.Errorf("invalid version `%s`", s) + } + _, err := fmt.Sscanf(s, format, v[0], v[1], v[2]) + return err +} + +func (v KafkaVersion) String() string { + if v.version[0] == 0 { + return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3]) + } else { + return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2]) + } +} diff --git a/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh b/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh new file mode 100755 index 00000000000..95e47dde434 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +set -ex + +# Launch and wait for toxiproxy +${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh & +while ! nc -q 1 localhost 2181 ${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}/myid +done diff --git a/vendor/github.com/Shopify/sarama/vagrant/kafka.conf b/vendor/github.com/Shopify/sarama/vagrant/kafka.conf new file mode 100644 index 00000000000..25101df5a32 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/vagrant/kafka.conf @@ -0,0 +1,9 @@ +start on started zookeeper-ZK_PORT +stop on stopping zookeeper-ZK_PORT + +# Use a script instead of exec (using env stanza leaks KAFKA_HEAP_OPTS from zookeeper) +script + sleep 2 + export KAFKA_HEAP_OPTS="-Xmx320m" + exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties +end script diff --git a/vendor/github.com/Shopify/sarama/vagrant/provision.sh b/vendor/github.com/Shopify/sarama/vagrant/provision.sh new file mode 100755 index 00000000000..13a8d56238e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/vagrant/provision.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +set -ex + +apt-get update +yes | apt-get install default-jre + +export KAFKA_INSTALL_ROOT=/opt +export KAFKA_HOSTNAME=192.168.100.67 +export KAFKA_VERSION=1.0.0 +export REPOSITORY_ROOT=/vagrant + +sh /vagrant/vagrant/install_cluster.sh +sh /vagrant/vagrant/setup_services.sh +sh /vagrant/vagrant/create_topics.sh diff --git a/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh b/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh new file mode 100755 index 00000000000..e52c00e7b5a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +set -ex + +${KAFKA_INSTALL_ROOT}/toxiproxy -port 8474 -host 0.0.0.0 & +PID=$! + +while ! nc -q 1 localhost 8474 + +# The number of threads handling network requests +num.network.threads=2 + +# The number of threads doing disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=1048576 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=1048576 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma seperated list of directories under which to store log files +log.dirs=KAFKA_DATADIR + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=2 + +# Create new topics with a replication factor of 2 so failover can be tested +# more easily. +default.replication.factor=2 + +auto.create.topics.enable=false +delete.topic.enable=true + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining +# segments don't drop below log.retention.bytes. +log.retention.bytes=268435456 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=268435456 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=60000 + +# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires. +# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction. +log.cleaner.enable=false + +############################# Zookeeper ############################# + +# Zookeeper connection string (see zookeeper docs for details). +# This is a comma separated host:port pairs, each corresponding to a zk +# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". +# You can also append an optional chroot string to the urls to specify the +# root directory for all kafka znodes. +zookeeper.connect=localhost:ZK_PORT + +# Timeout in ms for connecting to zookeeper +zookeeper.session.timeout.ms=3000 +zookeeper.connection.timeout.ms=3000 diff --git a/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh b/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh new file mode 100755 index 00000000000..81d8ea05d3b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh @@ -0,0 +1,29 @@ +#!/bin/sh + +set -ex + +stop toxiproxy || true +cp ${REPOSITORY_ROOT}/vagrant/toxiproxy.conf /etc/init/toxiproxy.conf +cp ${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh ${KAFKA_INSTALL_ROOT}/ +start toxiproxy + +for i in 1 2 3 4 5; do + ZK_PORT=`expr $i + 2180` + KAFKA_PORT=`expr $i + 9090` + + stop zookeeper-${ZK_PORT} || true + + # set up zk service + cp ${REPOSITORY_ROOT}/vagrant/zookeeper.conf /etc/init/zookeeper-${ZK_PORT}.conf + sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/zookeeper-${ZK_PORT}.conf + + # set up kafka service + cp ${REPOSITORY_ROOT}/vagrant/kafka.conf /etc/init/kafka-${KAFKA_PORT}.conf + sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf + sed -i s/ZK_PORT/${ZK_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf + + start zookeeper-${ZK_PORT} +done + +# Wait for the last kafka node to finish booting +while ! nc -q 1 localhost 29095