Skip to content

Commit

Permalink
[cluster] Fix etcd config default value handling (m3db#4130)
Browse files Browse the repository at this point in the history
  • Loading branch information
vdarulis authored and safa-topal committed Jan 12, 2023
1 parent fb30a99 commit cbcceac
Show file tree
Hide file tree
Showing 6 changed files with 59 additions and 59 deletions.
53 changes: 17 additions & 36 deletions scripts/docker-integration-tests/aggregator/m3aggregator.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,11 @@ m3msg:
server:
listenAddress: 0.0.0.0:6000
retry:
maxBackoff: 10s
maxBackoff: 1s
jitter: true
consumer:
messagePool:
size: 16384
watermark:
low: 0.2
high: 0.5

size: dynamic
http:
listenAddress: 0.0.0.0:6001
readTimeout: 60s
Expand All @@ -39,6 +35,8 @@ kvClient:
cacheDir: /var/lib/m3kv
etcdClusters:
- zone: embedded
autoSyncInterval: 10m
dialTimeout: 1m
endpoints:
- dbnode01:2379

Expand Down Expand Up @@ -68,7 +66,7 @@ aggregator:
timerTransformFnType: suffix
gaugeTransformFnType: empty
aggregationTypesPool:
size: 1024
size: 32
quantilesPool:
buckets:
- count: 256
Expand Down Expand Up @@ -105,18 +103,15 @@ aggregator:
namespaces:
placement: /placement
messagePool:
size: 16384
watermark:
low: 0.2
high: 0.5
size: dynamic
placementManager:
kvConfig:
namespace: /placement
environment: override_test_env
zone: embedded
placementWatcher:
key: m3aggregator
initWatchTimeout: 10s
initWatchTimeout: 1s
hashType: murmur32
bufferDurationBeforeShardCutover: 10m
bufferDurationAfterShardCutoff: 10m
Expand All @@ -130,8 +125,8 @@ aggregator:
flushTimesKeyFmt: shardset/%d/flush
flushTimesPersistRetrier:
initialBackoff: 100ms
backoffFactor: 2.0
maxBackoff: 2s
backoffFactor: 1.0
maxBackoff: 500ms
maxRetries: 3
electionManager:
election:
Expand All @@ -146,38 +141,27 @@ aggregator:
campaignRetrier:
initialBackoff: 100ms
backoffFactor: 2.0
maxBackoff: 2s
maxBackoff: 1s
forever: true
jitter: true
changeRetrier:
initialBackoff: 100ms
backoffFactor: 2.0
maxBackoff: 5s
maxBackoff: 1s
forever: true
jitter: true
resignRetrier:
initialBackoff: 100ms
backoffFactor: 2.0
maxBackoff: 5s
maxBackoff: 1s
forever: true
jitter: true
campaignStateCheckInterval: 1s
shardCutoffCheckOffset: 30s
flushManager:
checkEvery: 1s
jitterEnabled: true
maxJitters:
- flushInterval: 5s
maxJitterPercent: 1.0
- flushInterval: 10s
maxJitterPercent: 0.5
- flushInterval: 1m
maxJitterPercent: 0.5
- flushInterval: 10m
maxJitterPercent: 0.5
- flushInterval: 1h
maxJitterPercent: 0.25
numWorkersPerCPU: 0.5
checkEvery: 500ms
jitterEnabled: false
numWorkersPerCPU: 0.1
maxBufferSize: 5m
forcedFlushWindowSize: 10s
flush:
Expand All @@ -192,14 +176,11 @@ aggregator:
zone: embedded
environment: override_test_env
messagePool:
size: 16384
watermark:
low: 0.2
high: 0.5
size: dynamic
passthrough:
enabled: true
forwarding:
maxConstDelay: 1m # Need to add some buffer window, since timed metrics by default are delayed by 1min.
maxConstDelay: 65s # Need to add some buffer window, since timed metrics by default are delayed by 1min.
entryTTL: 1h
entryCheckInterval: 10m
maxTimerBatchSizePerWrite: 140
Expand Down
15 changes: 6 additions & 9 deletions scripts/docker-integration-tests/aggregator/m3coordinator.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ clusters:
cacheDir: /var/lib/m3kv
etcdClusters:
- zone: embedded
autoSyncInterval: 10m
dialTimeout: 1m
endpoints:
- dbnode01:2379

Expand Down Expand Up @@ -55,18 +57,13 @@ downsample:
namespaces:
placement: /placement
connection:
numConnections: 4
messagePool:
size: 16384
watermark:
low: 0.2
high: 0.5
numConnections: 1

ingest:
ingester:
workerPoolSize: 10000
workerPoolSize: 50
opPool:
size: 10000
size: dynamic
retry:
maxRetries: 3
jitter: true
Expand All @@ -75,7 +72,7 @@ ingest:
server:
listenAddress: "0.0.0.0:7507"
retry:
maxBackoff: 10s
maxBackoff: 1s
jitter: true

storeMetricsType: true
11 changes: 6 additions & 5 deletions src/cluster/client/etcd/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,8 @@ func Test_newConfigFromCluster(t *testing.T) {
Timeout: 7 * time.Second,
},
TLS: nil, // TODO: TLS config gets read eagerly here; test it separately.
AutoSyncInterval: 20 * time.Second,
AutoSyncInterval: 21 * time.Second,
DialTimeout: 42 * time.Second,
}
}

Expand All @@ -398,10 +399,10 @@ func Test_newConfigFromCluster(t *testing.T) {
assert.Equal(t,
clientv3.Config{
Endpoints: []string{"i1"},
AutoSyncInterval: 20000000000,
DialTimeout: 15000000000,
DialKeepAliveTime: 5000000010, // generated using fake rnd above
DialKeepAliveTimeout: 7000000000,
AutoSyncInterval: 21 * time.Second,
DialTimeout: 42 * time.Second,
DialKeepAliveTime: 5*time.Second + 10, // generated using fake rnd above
DialKeepAliveTimeout: 7 * time.Second,
MaxCallSendMsgSize: 33554432,
MaxCallRecvMsgSize: 33554432,
RejectOldCluster: false,
Expand Down
17 changes: 14 additions & 3 deletions src/cluster/client/etcd/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ type ClusterConfig struct {
KeepAlive *KeepAliveConfig `yaml:"keepAlive"`
TLS *TLSConfig `yaml:"tls"`
AutoSyncInterval time.Duration `yaml:"autoSyncInterval"`
DialTimeout time.Duration `yaml:"dialTimeout"`

DialOptions []grpc.DialOption `yaml:"-"` // nonserializable
}
Expand All @@ -50,13 +51,23 @@ func (c ClusterConfig) NewCluster() Cluster {
if c.KeepAlive != nil {
keepAliveOpts = c.KeepAlive.NewOptions()
}
return NewCluster().

cluster := NewCluster().
SetZone(c.Zone).
SetEndpoints(c.Endpoints).
SetDialOptions(c.DialOptions).
SetKeepAliveOptions(keepAliveOpts).
SetTLSOptions(c.TLS.newOptions()).
SetAutoSyncInterval(c.AutoSyncInterval)
SetTLSOptions(c.TLS.newOptions())

if c.AutoSyncInterval > 0 {
cluster = cluster.SetAutoSyncInterval(c.AutoSyncInterval)
}

if c.DialTimeout > 0 {
cluster = cluster.SetDialTimeout(c.DialTimeout)
}

return cluster
}

// TLSConfig is the config for TLS.
Expand Down
21 changes: 15 additions & 6 deletions src/cluster/client/etcd/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,8 @@ etcdClusters:
period: 10s
jitter: 5s
timeout: 1s
autoSyncInterval: 60s
autoSyncInterval: 160s
dialTimeout: 42s
- zone: z2
endpoints:
- etcd3:2379
Expand Down Expand Up @@ -93,7 +94,7 @@ m3sd:
require.Equal(t, "/tmp/cache.json", cfg.CacheDir)
require.Equal(t, int64(1), cfg.WatchWithRevision)
require.Equal(t, []ClusterConfig{
ClusterConfig{
{
Zone: "z1",
Endpoints: []string{"etcd1:2379", "etcd2:2379"},
KeepAlive: &KeepAliveConfig{
Expand All @@ -102,17 +103,18 @@ m3sd:
Jitter: 5 * time.Second,
Timeout: time.Second,
},
AutoSyncInterval: time.Second * 60,
AutoSyncInterval: 160 * time.Second,
DialTimeout: 42 * time.Second,
},
ClusterConfig{
{
Zone: "z2",
Endpoints: []string{"etcd3:2379", "etcd4:2379"},
TLS: &TLSConfig{
CrtPath: "foo.crt.pem",
KeyPath: "foo.key.pem",
},
},
ClusterConfig{
{
Zone: "z3",
Endpoints: []string{"etcd5:2379", "etcd6:2379"},
TLS: &TLSConfig{
Expand All @@ -132,7 +134,8 @@ m3sd:
require.Equal(t, 10*time.Second, keepAliveOpts.KeepAlivePeriod())
require.Equal(t, 5*time.Second, keepAliveOpts.KeepAlivePeriodMaxJitter())
require.Equal(t, time.Second, keepAliveOpts.KeepAliveTimeout())
require.Equal(t, 60*time.Second, cluster1.AutoSyncInterval())
require.Equal(t, 160*time.Second, cluster1.AutoSyncInterval())
require.Equal(t, 42*time.Second, cluster1.DialTimeout())

cluster2, exists := opts.ClusterForZone("z2")
require.True(t, exists)
Expand Down Expand Up @@ -172,3 +175,9 @@ m3sd:
require.Equal(t, os.FileMode(0744), *cfg2.NewDirectoryMode)
})
}

func TestDefaultConfig(t *testing.T) {
cluster := ClusterConfig{}.NewCluster()
require.Equal(t, defaultDialTimeout, cluster.DialTimeout())
require.Equal(t, defaultAutoSyncInterval, cluster.AutoSyncInterval())
}
1 change: 1 addition & 0 deletions src/cmd/services/m3dbnode/config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -638,6 +638,7 @@ func TestConfiguration(t *testing.T) {
keepAlive: null
tls: null
autoSyncInterval: 0s
dialTimeout: 0s
m3sd:
initTimeout: null
watchWithRevision: 0
Expand Down

0 comments on commit cbcceac

Please sign in to comment.