Skip to content

Commit

Permalink
Adding config variables to leader_election provider (#3625)
Browse files Browse the repository at this point in the history
* Adding config variables to leader election process

* Adding Debug message info

* Adding Changelog fragment
  • Loading branch information
gizas authored Nov 13, 2023
1 parent 3155ea9 commit bb5de53
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 3 deletions.
32 changes: 32 additions & 0 deletions changelog/fragments/1699010487-leaderelectionconfig.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Kind can be one of:
# - breaking-change: a change to previously-documented behavior
# - deprecation: functionality that is being removed in a later release
# - bug-fix: fixes a problem in a previous version
# - enhancement: extends functionality but does not break or fix existing behavior
# - feature: new functionality
# - known-issue: problems that we are aware of in a given version
# - security: impacts on the security of a product or a user’s deployment.
# - upgrade: important information for someone upgrading from a prior version
# - other: does not fit into any of the other categories
kind: enhancement

# Change summary; a 80ish characters long description of the change.
summary: Added Kubernetes leader_election provider configuration parameters

# Long description; in case the summary is not enough to describe the change
# this field accommodate a description without length limits.
# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment.
#description:

# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc.
component: elastic-agent

# PR URL; optional; the PR number that added the changeset.
# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added.
# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number.
# Please provide it if you are adding a fragment for a different PR.
#pr: https://github.com/owner/repo/1234

# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of).
# If not present is automatically filled by the tooling with the issue linked to the PR number.
#issue: https://github.com/owner/repo/1234
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,18 @@ type Config struct {
// Name of the leaderelection lease
LeaderLease string `config:"leader_lease"`

//Parameters to configure election process
LeaseDuration int `config:"leader_leaseduration"`
RenewDeadline int `config:"leader_renewdeadline"`
RetryPeriod int `config:"leader_retryperiod"`

KubeClientOptions kubernetes.KubeClientOptions `config:"kube_client_options"`
}

// InitDefaults initializes the default values for the config.
func (c *Config) InitDefaults() {
c.LeaderLease = "elastic-agent-cluster-leader"
c.LeaseDuration = 15
c.RenewDeadline = 10
c.RetryPeriod = 2
}
Original file line number Diff line number Diff line change
Expand Up @@ -83,12 +83,13 @@ func (p *contextProvider) Run(ctx context.Context, comm corecomp.ContextProvider
},
},
ReleaseOnCancel: true,
LeaseDuration: 15 * time.Second,
RenewDeadline: 10 * time.Second,
RetryPeriod: 2 * time.Second,
LeaseDuration: time.Duration(p.config.LeaseDuration) * time.Second,
RenewDeadline: time.Duration(p.config.RenewDeadline) * time.Second,
RetryPeriod: time.Duration(p.config.RetryPeriod) * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
p.logger.Debugf("leader election lock GAINED, id %v", id)
p.logger.Debugf("leader configuration timings: LeaseDuration: %v , RenewDeadline: %v, RetryPeriod: %v", p.leaderElection.LeaseDuration, p.leaderElection.RenewDeadline, p.leaderElection.RetryPeriod)
p.startLeading(comm)
},
OnStoppedLeading: func() {
Expand Down

0 comments on commit bb5de53

Please sign in to comment.