Skip to content

Commit

Permalink
Fixes kafka config prefix
Browse files Browse the repository at this point in the history
  • Loading branch information
cyriltovena committed Sep 5, 2024
1 parent 4723d78 commit ddacb06
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 11 deletions.
20 changes: 10 additions & 10 deletions docs/sources/shared/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -1029,53 +1029,53 @@ metastore_client:

kafka_config:
# The Kafka backend address.
# CLI flag: -.address
# CLI flag: -kafka.address
[address: <string> | default = "localhost:9092"]

# The Kafka topic name.
# CLI flag: -.topic
# CLI flag: -kafka.topic
[topic: <string> | default = ""]

# The Kafka client ID.
# CLI flag: -.client-id
# CLI flag: -kafka.client-id
[client_id: <string> | default = ""]

# The maximum time allowed to open a connection to a Kafka broker.
# CLI flag: -.dial-timeout
# CLI flag: -kafka.dial-timeout
[dial_timeout: <duration> | default = 2s]

# How long to wait for an incoming write request to be successfully committed
# to the Kafka backend.
# CLI flag: -.write-timeout
# CLI flag: -kafka.write-timeout
[write_timeout: <duration> | default = 10s]

# The consumer group used by the consumer to track the last consumed offset.
# The consumer group must be different for each ingester. If the configured
# consumer group contains the '<partition>' placeholder, it is replaced with
# the actual partition ID owned by the ingester. When empty (recommended),
# Mimir uses the ingester instance ID to guarantee uniqueness.
# CLI flag: -.consumer-group
# CLI flag: -kafka.consumer-group
[consumer_group: <string> | default = ""]

# How long to retry a failed request to get the last produced offset.
# CLI flag: -.last-produced-offset-retry-timeout
# CLI flag: -kafka.last-produced-offset-retry-timeout
[last_produced_offset_retry_timeout: <duration> | default = 10s]

# Enable auto-creation of Kafka topic if it doesn't exist.
# CLI flag: -.auto-create-topic-enabled
# CLI flag: -kafka.auto-create-topic-enabled
[auto_create_topic_enabled: <boolean> | default = true]

# The maximum size of a Kafka record data that should be generated by the
# producer. An incoming write request larger than this size is split into
# multiple Kafka records. We strongly recommend to not change this setting
# unless for testing purposes.
# CLI flag: -.producer-max-record-size-bytes
# CLI flag: -kafka.producer-max-record-size-bytes
[producer_max_record_size_bytes: <int> | default = 15983616]

# The maximum size of (uncompressed) buffered and unacknowledged produced
# records sent to Kafka. The produce request fails once this limit is reached.
# This limit is per Kafka client. 0 to disable the limit.
# CLI flag: -.producer-max-buffered-bytes
# CLI flag: -kafka.producer-max-buffered-bytes
[producer_max_buffered_bytes: <int> | default = 1073741824]

kafka_ingester:
Expand Down
2 changes: 1 addition & 1 deletion pkg/kafka/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ type Config struct {
}

func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.RegisterFlagsWithPrefix("", f)
cfg.RegisterFlagsWithPrefix("kafka", f)
}

func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
Expand Down

0 comments on commit ddacb06

Please sign in to comment.