diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 3c99a4177a33..404eb0fa1e98 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -84,6 +84,7 @@ https://github.com/elastic/beats/compare/v6.5.0...6.x[Check the HEAD diff] - Dissect will now flag event on parsing error. {pull}8751[8751] - Added the `redirect_stderr` option that allows panics to be logged to log files. {pull}8430[8430] - Add cache.ttl to add_host_metadata. {pull}9359[9359] +- Add support for index lifecycle management (beta). {pull}7963[7963] *Auditbeat* diff --git a/auditbeat/auditbeat.reference.yml b/auditbeat/auditbeat.reference.yml index b567592f4136..c28191f7f1ec 100644 --- a/auditbeat/auditbeat.reference.yml +++ b/auditbeat/auditbeat.reference.yml @@ -352,6 +352,11 @@ output.elasticsearch: # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + #ilm.rollover_alias: "auditbeat" + #ilm.pattern: "{now/d}-000001" + # Set gzip compression level. #compression_level: 0 diff --git a/auditbeat/auditbeat.yml b/auditbeat/auditbeat.yml index 3e0e41e4c037..79a1b1cb9028 100644 --- a/auditbeat/auditbeat.yml +++ b/auditbeat/auditbeat.yml @@ -122,6 +122,9 @@ output.elasticsearch: # Array of hosts to connect to. hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + # Optional protocol and basic auth credentials. #protocol: "https" #username: "elastic" diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index af4d11a305a6..5e750bb54a15 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -1031,6 +1031,11 @@ output.elasticsearch: # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + #ilm.rollover_alias: "filebeat" + #ilm.pattern: "{now/d}-000001" + # Set gzip compression level. #compression_level: 0 diff --git a/filebeat/filebeat.yml b/filebeat/filebeat.yml index 5de1a4d1e55c..78467764cc94 100644 --- a/filebeat/filebeat.yml +++ b/filebeat/filebeat.yml @@ -149,6 +149,9 @@ output.elasticsearch: # Array of hosts to connect to. hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + # Optional protocol and basic auth credentials. #protocol: "https" #username: "elastic" diff --git a/heartbeat/heartbeat.reference.yml b/heartbeat/heartbeat.reference.yml index 6e901792cf76..5235cea33093 100644 --- a/heartbeat/heartbeat.reference.yml +++ b/heartbeat/heartbeat.reference.yml @@ -490,6 +490,11 @@ output.elasticsearch: # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + #ilm.rollover_alias: "heartbeat" + #ilm.pattern: "{now/d}-000001" + # Set gzip compression level. #compression_level: 0 diff --git a/heartbeat/heartbeat.yml b/heartbeat/heartbeat.yml index da3f3d9aa0c1..1e20e85e6b3b 100644 --- a/heartbeat/heartbeat.yml +++ b/heartbeat/heartbeat.yml @@ -96,6 +96,9 @@ output.elasticsearch: # Array of hosts to connect to. hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + # Optional protocol and basic auth credentials. #protocol: "https" #username: "elastic" diff --git a/journalbeat/journalbeat.reference.yml b/journalbeat/journalbeat.reference.yml index 87c24f5a75a5..6dc6ae01aae1 100644 --- a/journalbeat/journalbeat.reference.yml +++ b/journalbeat/journalbeat.reference.yml @@ -303,6 +303,11 @@ output.elasticsearch: # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + #ilm.rollover_alias: "journalbeat" + #ilm.pattern: "{now/d}-000001" + # Set gzip compression level. #compression_level: 0 diff --git a/journalbeat/journalbeat.yml b/journalbeat/journalbeat.yml index ff503f25f8a5..b8ab139f8f88 100644 --- a/journalbeat/journalbeat.yml +++ b/journalbeat/journalbeat.yml @@ -124,6 +124,9 @@ output.elasticsearch: # Array of hosts to connect to. hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + # Optional protocol and basic auth credentials. #protocol: "https" #username: "elastic" diff --git a/libbeat/_meta/config.reference.yml b/libbeat/_meta/config.reference.yml index e0e604346fb5..e49f917cb522 100644 --- a/libbeat/_meta/config.reference.yml +++ b/libbeat/_meta/config.reference.yml @@ -245,6 +245,11 @@ output.elasticsearch: # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + #ilm.rollover_alias: "beat-index-prefix" + #ilm.pattern: "{now/d}-000001" + # Set gzip compression level. #compression_level: 0 diff --git a/libbeat/_meta/config.yml b/libbeat/_meta/config.yml index 3559eb140321..0847a93afd14 100644 --- a/libbeat/_meta/config.yml +++ b/libbeat/_meta/config.yml @@ -66,6 +66,9 @@ output.elasticsearch: # Array of hosts to connect to. hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + # Optional protocol and basic auth credentials. #protocol: "https" #username: "elastic" diff --git a/libbeat/cmd/export.go b/libbeat/cmd/export.go index e9d6015992da..07f9cd16da0a 100644 --- a/libbeat/cmd/export.go +++ b/libbeat/cmd/export.go @@ -33,6 +33,7 @@ func genExportCmd(settings instance.Settings, name, idxPrefix, beatVersion strin exportCmd.AddCommand(export.GenExportConfigCmd(settings, name, idxPrefix, beatVersion)) exportCmd.AddCommand(export.GenTemplateConfigCmd(settings, name, idxPrefix, beatVersion)) exportCmd.AddCommand(export.GenDashboardCmd(name, idxPrefix, beatVersion)) + exportCmd.AddCommand(export.GenGetILMPolicyCmd()) return exportCmd } diff --git a/libbeat/cmd/export/ilm_policy.go b/libbeat/cmd/export/ilm_policy.go new file mode 100644 index 000000000000..a9e4542fc29e --- /dev/null +++ b/libbeat/cmd/export/ilm_policy.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package export + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/elastic/beats/libbeat/cmd/instance" +) + +// GenGetILMPolicyCmd is the command used to export the ilm policy. +func GenGetILMPolicyCmd() *cobra.Command { + genTemplateConfigCmd := &cobra.Command{ + Use: "ilm-policy", + Short: "Export ILM policy", + Run: func(cmd *cobra.Command, args []string) { + fmt.Println(instance.ILMPolicy.StringToPrint()) + }, + } + + return genTemplateConfigCmd +} diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index b9c09a8a9e76..825409d42e6f 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -105,6 +105,9 @@ type beatConfig struct { Dashboards *common.Config `config:"setup.dashboards"` Template *common.Config `config:"setup.template"` Kibana *common.Config `config:"setup.kibana"` + + // ILM Config options + ILM *common.Config `config:"output.elasticsearch.ilm"` } var ( @@ -430,7 +433,7 @@ func (b *Beat) TestConfig(bt beat.Creator) error { } // Setup registers ES index template, kibana dashboards, ml jobs and pipelines. -func (b *Beat) Setup(bt beat.Creator, template, setupDashboards, machineLearning, pipelines bool) error { +func (b *Beat) Setup(bt beat.Creator, template, setupDashboards, machineLearning, pipelines, policy bool) error { return handleError(func() error { err := b.Init() if err != nil { @@ -509,6 +512,13 @@ func (b *Beat) Setup(bt beat.Creator, template, setupDashboards, machineLearning fmt.Println("Loaded Ingest pipelines") } + if policy { + if err := b.loadILMPolicy(); err != nil { + return err + } + fmt.Println("Loaded Index Lifecycle Management (ILM) policy") + } + return nil }()) } @@ -724,11 +734,11 @@ func (b *Beat) loadDashboards(ctx context.Context, force bool) error { // the elasticsearch output. It is important the the registration happens before // the publisher is created. func (b *Beat) registerTemplateLoading() error { - var cfg template.TemplateConfig + var templateCfg template.TemplateConfig // Check if outputting to file is enabled, and output to file if it is if b.Config.Template.Enabled() { - err := b.Config.Template.Unpack(&cfg) + err := b.Config.Template.Unpack(&templateCfg) if err != nil { return fmt.Errorf("unpacking template config fails: %v", err) } @@ -746,8 +756,82 @@ func (b *Beat) registerTemplateLoading() error { return err } - if esCfg.Index != "" && (cfg.Name == "" || cfg.Pattern == "") && (b.Config.Template == nil || b.Config.Template.Enabled()) { - return fmt.Errorf("setup.template.name and setup.template.pattern have to be set if index name is modified.") + if esCfg.Index != "" && + (templateCfg.Name == "" || templateCfg.Pattern == "") && + (b.Config.Template == nil || b.Config.Template.Enabled()) { + return errors.New("setup.template.name and setup.template.pattern have to be set if index name is modified") + } + + if b.Config.ILM.Enabled() { + cfgwarn.Beta("Index lifecycle management is enabled which is in beta.") + + ilmCfg, err := getILMConfig(b) + if err != nil { + return err + } + + // In case no template settings are set, config must be created + if b.Config.Template == nil { + b.Config.Template = common.NewConfig() + } + // Template name and pattern can't be configure when using ILM + logp.Info("Set setup.template.name to '%s' as ILM is enabled.", ilmCfg.RolloverAlias) + err = b.Config.Template.SetString("name", -1, ilmCfg.RolloverAlias) + if err != nil { + return errw.Wrap(err, "error setting setup.template.name") + } + pattern := fmt.Sprintf("%s-*", ilmCfg.RolloverAlias) + logp.Info("Set setup.template.pattern to '%s' as ILM is enabled.", pattern) + err = b.Config.Template.SetString("pattern", -1, pattern) + if err != nil { + return errw.Wrap(err, "error setting setup.template.pattern") + } + + // rollover_alias and lifecycle.name can't be configured and will be overwritten + logp.Info("Set settings.index.lifecycle.rollover_alias in template to %s as ILM is enabled.", ilmCfg.RolloverAlias) + err = b.Config.Template.SetString("settings.index.lifecycle.rollover_alias", -1, ilmCfg.RolloverAlias) + if err != nil { + return errw.Wrap(err, "error setting settings.index.lifecycle.rollover_alias") + } + logp.Info("Set settings.index.lifecycle.name in template to %s as ILM is enabled.", ILMPolicyName) + err = b.Config.Template.SetString("settings.index.lifecycle.name", -1, ILMPolicyName) + if err != nil { + return errw.Wrap(err, "error setting settings.index.lifecycle.name") + } + + // Set the ingestion index to the rollover alias + logp.Info("Set output.elasticsearch.index to '%s' as ILM is enabled.", ilmCfg.RolloverAlias) + esCfg.Index = ilmCfg.RolloverAlias + err = b.Config.Output.Config().SetString("index", -1, ilmCfg.RolloverAlias) + if err != nil { + return errw.Wrap(err, "error setting output.elasticsearch.index") + } + + writeAliasCallback, err := b.writeAliasLoadingCallback() + if err != nil { + return err + } + + // Load write alias already on + esConfig := b.Config.Output.Config() + + // Check that ILM is enabled and the right elasticsearch version exists + esClient, err := elasticsearch.NewConnectedClient(esConfig) + if err != nil { + return err + } + + err = checkElasticsearchVersionIlm(esClient) + if err != nil { + return err + } + + err = checkILMFeatureEnabled(esClient) + if err != nil { + return err + } + + elasticsearch.RegisterConnectCallback(writeAliasCallback) } if b.Config.Template == nil || (b.Config.Template != nil && b.Config.Template.Enabled()) { @@ -759,6 +843,8 @@ func (b *Beat) registerTemplateLoading() error { return err } elasticsearch.RegisterConnectCallback(callback) + } else if b.Config.ILM.Enabled() { + return errors.New("templates cannot be disable when using ILM") } } diff --git a/libbeat/cmd/instance/ilm.go b/libbeat/cmd/instance/ilm.go new file mode 100644 index 000000000000..0097e658b6c4 --- /dev/null +++ b/libbeat/cmd/instance/ilm.go @@ -0,0 +1,211 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package instance + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/pkg/errors" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/libbeat/outputs/elasticsearch" +) + +type ilmConfig struct { + RolloverAlias string `config:"ilm.rollover_alias" ` + Pattern string `config:"ilm.pattern"` +} + +// ILMPolicy contains the default policy +var ILMPolicy = common.MapStr{ + "policy": common.MapStr{ + "phases": common.MapStr{ + "hot": common.MapStr{ + "actions": common.MapStr{ + "rollover": common.MapStr{ + "max_size": "50gb", + "max_age": "30d", + }, + }, + }, + }, + }, +} + +const ( + // ILMPolicyName is the default policy name + ILMPolicyName = "beats-default-policy" + // ILMDefaultPattern is the default pattern + ILMDefaultPattern = "{now/d}-000001" +) + +// Build and return a callback to load ILM write alias +func (b *Beat) writeAliasLoadingCallback() (func(esClient *elasticsearch.Client) error, error) { + callback := func(esClient *elasticsearch.Client) error { + if b.Config.ILM == nil { + b.Config.ILM = common.NewConfig() + } + + config, err := getILMConfig(b) + if err != nil { + return err + } + + // Escaping because of date pattern + pattern := url.PathEscape(config.Pattern) + // This always assume it's a date pattern by sourrounding it by <...> + firstIndex := fmt.Sprintf("%%3C%s-%s%%3E", config.RolloverAlias, pattern) + + // Check if alias already exists + status, b, err := esClient.Request("HEAD", "/_alias/"+config.RolloverAlias, "", nil, nil) + if err != nil && status != 404 { + logp.Err("Failed to check for alias: %s: %+v", err, string(b)) + return errors.Wrap(err, "failed to check for alias") + } + if status == 200 { + logp.Info("Write alias already exists") + return nil + } + + body := common.MapStr{ + "aliases": common.MapStr{ + config.RolloverAlias: common.MapStr{ + "is_write_index": true, + }, + }, + } + + // Create alias with write index + code, res, err := esClient.Request("PUT", "/"+firstIndex, "", nil, body) + if code == 400 { + logp.Err("Error creating alias with write index. As return code is 400, assuming already exists: %s, %s", err, string(res)) + return nil + + } else if err != nil { + logp.Err("Error creating alias with write index: %s, %s", err, string(res)) + return errors.Wrap(err, "failed to create write alias: "+string(res)) + } + + logp.Info("Alias with write index created: %s", firstIndex) + + return nil + } + + return callback, nil +} + +func (b *Beat) loadILMPolicy() error { + esClient, err := getElasticsearchClient(b) + if err != nil { + return err + } + + _, _, err = esClient.Request("PUT", "/_ilm/policy/"+ILMPolicyName, "", nil, ILMPolicy) + return err +} + +func getElasticsearchClient(b *Beat) (*elasticsearch.Client, error) { + outCfg := b.Config.Output + if outCfg.Name() != "elasticsearch" { + return nil, fmt.Errorf("Policy loading requested but the Elasticsearch output is not configured/enabled") + } + + esConfig := outCfg.Config() + + return elasticsearch.NewConnectedClient(esConfig) +} + +func loadConfigWithDefaults(config *ilmConfig, b *Beat) { + if config.RolloverAlias == "" { + config.RolloverAlias = fmt.Sprintf("%s-%s", b.Info.Beat, b.Info.Version) + } + + if config.Pattern == "" { + config.Pattern = ILMDefaultPattern + } +} + +func checkElasticsearchVersionIlm(client *elasticsearch.Client) error { + esVer := client.GetVersion() + esV, err := common.NewVersion(esVer) + if err != nil { + return err + } + + requiredVersion, err := common.NewVersion("6.6.0") + if err != nil { + return err + } + + if esV.LessThan(requiredVersion) { + return fmt.Errorf("ILM requires at least Elasticsearch 6.6.0. Used version: %s", esV.String()) + } + + return nil +} + +func checkILMFeatureEnabled(client *elasticsearch.Client) error { + code, body, err := client.Request("GET", "/_xpack", "", nil, nil) + + // If we get a 400, it's assumed to be the OSS version of Elasticsearch + if code == 400 { + return fmt.Errorf("ILM feature is not available in this Elasticsearch version") + } + if err != nil { + return err + } + + var response struct { + Features struct { + ILM struct { + Available bool `json:"available"` + Enabled bool `json:"enabled"` + } `json:"ilm"` + } `json:"features"` + } + + err = json.Unmarshal(body, &response) + if err != nil { + return fmt.Errorf("failed to parse JSON response: %v", err) + } + + if !response.Features.ILM.Available { + return fmt.Errorf("ILM feature is not available in Elasticsearch") + } + + if !response.Features.ILM.Enabled { + return fmt.Errorf("ILM feature is not enabled in Elasticsearch") + } + + return nil +} + +func getILMConfig(b *Beat) (*ilmConfig, error) { + config := &ilmConfig{} + err := b.Config.Output.Config().Unpack(config) + if err != nil { + return nil, errors.Wrap(err, "problem unpacking ilm configs") + } + + loadConfigWithDefaults(config, b) + + return config, nil +} diff --git a/libbeat/cmd/setup.go b/libbeat/cmd/setup.go index 2b4f5cd326be..fd096a343707 100644 --- a/libbeat/cmd/setup.go +++ b/libbeat/cmd/setup.go @@ -37,6 +37,7 @@ func genSetupCmd(name, idxPrefix, version string, beatCreator beat.Creator) *cob * Kibana dashboards (where available). * ML jobs (where available). * Ingest pipelines (where available). + * ILM policy (for Elasticsearch 6.5 and newer). `, Run: func(cmd *cobra.Command, args []string) { beat, err := instance.NewBeat(name, idxPrefix, version) @@ -49,16 +50,18 @@ func genSetupCmd(name, idxPrefix, version string, beatCreator beat.Creator) *cob dashboards, _ := cmd.Flags().GetBool("dashboards") machineLearning, _ := cmd.Flags().GetBool("machine-learning") pipelines, _ := cmd.Flags().GetBool("pipelines") + policy, _ := cmd.Flags().GetBool("ilm-policy") // No flags: setup all - if !template && !dashboards && !machineLearning && !pipelines { + if !template && !dashboards && !machineLearning && !pipelines && !policy { template = true dashboards = true machineLearning = true pipelines = true + policy = true } - if err = beat.Setup(beatCreator, template, dashboards, machineLearning, pipelines); err != nil { + if err = beat.Setup(beatCreator, template, dashboards, machineLearning, pipelines, policy); err != nil { os.Exit(1) } }, @@ -68,6 +71,7 @@ func genSetupCmd(name, idxPrefix, version string, beatCreator beat.Creator) *cob setup.Flags().Bool("dashboards", false, "Setup dashboards") setup.Flags().Bool("machine-learning", false, "Setup machine learning job configurations") setup.Flags().Bool("pipelines", false, "Setup Ingest pipelines") + setup.Flags().Bool("ilm-policy", false, "Setup ILM policy") return &setup } diff --git a/libbeat/docs/command-reference.asciidoc b/libbeat/docs/command-reference.asciidoc index e30cabe3b5d6..577a7ab58300 100644 --- a/libbeat/docs/command-reference.asciidoc +++ b/libbeat/docs/command-reference.asciidoc @@ -184,6 +184,10 @@ If {kib} is not running on `localhost:5061`, you must also adjust the Exports the index template to stdout. You can specify the `--es.version` and `--index` flags to further define what gets exported. +[[ilm-policy-subcommand]] +*`ilm-policy`*:: +Exports ILM policy to stdout. + *FLAGS* *`--es.version VERSION`*:: diff --git a/libbeat/tests/system/config/libbeat.yml.j2 b/libbeat/tests/system/config/libbeat.yml.j2 index a8e494e63eb9..839836be2875 100644 --- a/libbeat/tests/system/config/libbeat.yml.j2 +++ b/libbeat/tests/system/config/libbeat.yml.j2 @@ -66,6 +66,9 @@ output: {% if elasticsearch.index %} index: {{elasticsearch.index}} {% endif %} + {% if elasticsearch.ilm %} + ilm.enabled: {{elasticsearch.ilm}} + {% endif %} {%- endif %} {% if logstash %} diff --git a/libbeat/tests/system/test_ilm.py b/libbeat/tests/system/test_ilm.py new file mode 100644 index 000000000000..305f6f4e2fe7 --- /dev/null +++ b/libbeat/tests/system/test_ilm.py @@ -0,0 +1,253 @@ +from base import BaseTest +import os +from elasticsearch import Elasticsearch, TransportError +from nose.plugins.attrib import attr +import unittest +import shutil +import logging +import datetime + +INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) + + +class Test(BaseTest): + + def setUp(self): + super(BaseTest, self).setUp() + + self.elasticsearch_url = self.get_elasticsearch_url() + print("Using elasticsearch: {}".format(self.elasticsearch_url)) + self.es = Elasticsearch([self.elasticsearch_url]) + self.alias_name = "mockbeat-9.9.9" + self.policy_name = "beats-default-policy" + logging.getLogger("urllib3").setLevel(logging.WARNING) + logging.getLogger("elasticsearch").setLevel(logging.ERROR) + + @unittest.skipUnless(INTEGRATION_TESTS, "integration test") + @attr('integration') + def test_enabled(self): + """ + Test ilm enabled + """ + + self.render_config_template( + elasticsearch={ + "hosts": self.get_elasticsearch_url(), + "ilm.enabled": True, + }, + ) + + self.clean() + + proc = self.start_beat() + self.wait_until(lambda: self.log_contains("mockbeat start running.")) + self.wait_until(lambda: self.log_contains("Set setup.template.name")) + self.wait_until(lambda: self.log_contains("PublishEvents: 1 events have been published")) + proc.check_kill_and_wait() + + # Check if template is loaded with settings + template = self.es.transport.perform_request('GET', '/_template/' + self.alias_name) + + assert template[self.alias_name]["settings"]["index"]["lifecycle"]["name"] == "beats-default-policy" + assert template[self.alias_name]["settings"]["index"]["lifecycle"]["rollover_alias"] == self.alias_name + + # Make sure the correct index + alias was created + alias = self.es.transport.perform_request('GET', '/_alias/' + self.alias_name) + d = datetime.datetime.now() + now = d.strftime("%Y.%m.%d") + index_name = self.alias_name + "-" + now + "-000001" + assert index_name in alias + assert alias[index_name]["aliases"][self.alias_name]["is_write_index"] == True + + # Asserts that data is actually written to the ILM indices + self.wait_until(lambda: self.es.transport.perform_request( + 'GET', '/' + index_name + '/_search')["hits"]["total"] > 0) + + data = self.es.transport.perform_request('GET', '/' + index_name + '/_search') + assert data["hits"]["total"] > 0 + + @unittest.skipUnless(INTEGRATION_TESTS, "integration test") + @attr('integration') + def test_rollover_alias(self): + """ + Test ilm rollover alias setting + """ + + alias_name = "foo" + self.render_config_template( + elasticsearch={ + "hosts": self.get_elasticsearch_url(), + "ilm.enabled": True, + "ilm.pattern": "1", + "ilm.rollover_alias": alias_name + }, + ) + + self.clean(alias_name=alias_name) + + proc = self.start_beat() + self.wait_until(lambda: self.log_contains("mockbeat start running.")) + self.wait_until(lambda: self.log_contains("Set setup.template.name")) + self.wait_until(lambda: self.log_contains("PublishEvents: 1 events have been published")) + proc.check_kill_and_wait() + + # Make sure the correct index + alias was created + print '/_alias/' + alias_name + logfile = self.beat_name + ".log" + with open(os.path.join(self.working_dir, logfile), "r") as f: + print f.read() + + alias = self.es.transport.perform_request('GET', '/_alias/' + alias_name) + index_name = alias_name + "-1" + assert index_name in alias + + @unittest.skipUnless(INTEGRATION_TESTS, "integration test") + @attr('integration') + def test_pattern(self): + """ + Test ilm pattern setting + """ + + self.render_config_template( + elasticsearch={ + "hosts": self.get_elasticsearch_url(), + "ilm.enabled": True, + "ilm.pattern": "1" + }, + ) + + self.clean() + + proc = self.start_beat() + self.wait_until(lambda: self.log_contains("mockbeat start running.")) + self.wait_until(lambda: self.log_contains("Set setup.template.name")) + self.wait_until(lambda: self.log_contains("PublishEvents: 1 events have been published")) + proc.check_kill_and_wait() + + # Make sure the correct index + alias was created + print '/_alias/' + self.alias_name + logfile = self.beat_name + ".log" + with open(os.path.join(self.working_dir, logfile), "r") as f: + print f.read() + + alias = self.es.transport.perform_request('GET', '/_alias/' + self.alias_name) + index_name = self.alias_name + "-1" + assert index_name in alias + + @unittest.skipUnless(INTEGRATION_TESTS, "integration test") + @attr('integration') + def test_pattern_date(self): + """ + Test ilm pattern with date inside + """ + + self.render_config_template( + elasticsearch={ + "hosts": self.get_elasticsearch_url(), + "ilm.enabled": True, + "ilm.pattern": "'{now/d}'" + }, + ) + + self.clean() + + proc = self.start_beat() + self.wait_until(lambda: self.log_contains("mockbeat start running.")) + self.wait_until(lambda: self.log_contains("Set setup.template.name")) + self.wait_until(lambda: self.log_contains("PublishEvents: 1 events have been published")) + proc.check_kill_and_wait() + + # Make sure the correct index + alias was created + print '/_alias/' + self.alias_name + logfile = self.beat_name + ".log" + with open(os.path.join(self.working_dir, logfile), "r") as f: + print f.read() + + # Make sure the correct index + alias was created + alias = self.es.transport.perform_request('GET', '/_alias/' + self.alias_name) + d = datetime.datetime.now() + now = d.strftime("%Y.%m.%d") + index_name = self.alias_name + "-" + now + assert index_name in alias + assert alias[index_name]["aliases"][self.alias_name]["is_write_index"] == True + + @unittest.skipUnless(INTEGRATION_TESTS, "integration test") + @attr('integration') + def test_setup_ilm_policy(self): + """ + Test ilm policy setup + """ + + self.clean() + + shutil.copy(self.beat_path + "/_meta/config.yml", + os.path.join(self.working_dir, "libbeat.yml")) + shutil.copy(self.beat_path + "/fields.yml", + os.path.join(self.working_dir, "fields.yml")) + + exit_code = self.run_beat( + logging_args=["-v", "-d", "*"], + extra_args=["setup", + "--ilm-policy", + "-path.config", self.working_dir, + "-E", "output.elasticsearch.hosts=['" + self.get_elasticsearch_url() + "']"], + config="libbeat.yml") + + assert exit_code == 0 + + policy = self.es.transport.perform_request('GET', "/_ilm/policy/" + self.policy_name) + assert self.policy_name in policy + + @attr('integration') + def test_export_ilm_policy(self): + """ + Test ilm policy export + """ + + self.clean() + + shutil.copy(self.beat_path + "/_meta/config.yml", + os.path.join(self.working_dir, "libbeat.yml")) + shutil.copy(self.beat_path + "/fields.yml", + os.path.join(self.working_dir, "fields.yml")) + + exit_code = self.run_beat( + logging_args=["-v", "-d", "*"], + extra_args=["export", + "ilm-policy", + ], + config="libbeat.yml") + + assert exit_code == 0 + + assert self.log_contains('"max_age": "30d"') + assert self.log_contains('"max_size": "50gb"') + + def clean(self, alias_name=""): + + if alias_name == "": + alias_name = self.alias_name + + # Delete existing indices and aliases with it policy + try: + self.es.transport.perform_request('DELETE', "/" + alias_name + "*") + except: + pass + + # Delete any existing policy + try: + self.es.transport.perform_request('DELETE', "/_ilm/policy/" + self.policy_name) + except: + pass + + # Delete templates + try: + self.es.transport.perform_request('DELETE', "/_template/mockbeat*") + except: + pass + + # Delete indices + try: + self.es.transport.perform_request('DELETE', "/foo*,mockbeat*") + except: + pass diff --git a/libbeat/tests/system/test_template.py b/libbeat/tests/system/test_template.py index f91d774d8418..66a67f4c0a0f 100644 --- a/libbeat/tests/system/test_template.py +++ b/libbeat/tests/system/test_template.py @@ -21,7 +21,7 @@ def test_index_modified(self): assert exit_code == 1 assert self.log_contains( - "setup.template.name and setup.template.pattern have to be set if index name is modified.") is True + "setup.template.name and setup.template.pattern have to be set if index name is modified") is True def test_index_not_modified(self): """ @@ -48,7 +48,7 @@ def test_index_modified_no_pattern(self): assert exit_code == 1 assert self.log_contains( - "setup.template.name and setup.template.pattern have to be set if index name is modified.") is True + "setup.template.name and setup.template.pattern have to be set if index name is modified") is True def test_index_modified_no_name(self): """ @@ -63,7 +63,7 @@ def test_index_modified_no_name(self): assert exit_code == 1 assert self.log_contains( - "setup.template.name and setup.template.pattern have to be set if index name is modified.") is True + "setup.template.name and setup.template.pattern have to be set if index name is modified") is True def test_index_with_pattern_name(self): """ diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index 59e436fa1a86..96ffbeaef864 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -930,6 +930,11 @@ output.elasticsearch: # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + #ilm.rollover_alias: "metricbeat" + #ilm.pattern: "{now/d}-000001" + # Set gzip compression level. #compression_level: 0 diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index 2ff8c47f6e42..aa4765e82205 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -93,6 +93,9 @@ output.elasticsearch: # Array of hosts to connect to. hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + # Optional protocol and basic auth credentials. #protocol: "https" #username: "elastic" diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index ed7b73f965a4..93f144d76a3c 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -726,6 +726,11 @@ output.elasticsearch: # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + #ilm.rollover_alias: "packetbeat" + #ilm.pattern: "{now/d}-000001" + # Set gzip compression level. #compression_level: 0 diff --git a/packetbeat/packetbeat.yml b/packetbeat/packetbeat.yml index 284af2be4a50..36b2d1142955 100644 --- a/packetbeat/packetbeat.yml +++ b/packetbeat/packetbeat.yml @@ -176,6 +176,9 @@ output.elasticsearch: # Array of hosts to connect to. hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + # Optional protocol and basic auth credentials. #protocol: "https" #username: "elastic" diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index be1ed9b0e7ba..dff20f3b4d36 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.1' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:6.5.0-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:6.6.0-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9200"] retries: 300 @@ -16,7 +16,7 @@ services: - "xpack.security.enabled=false" logstash: - image: docker.elastic.co/logstash/logstash:6.5.0-SNAPSHOT + image: docker.elastic.co/logstash/logstash:6.6.0-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -26,7 +26,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:6.5.0-SNAPSHOT + image: docker.elastic.co/kibana/kibana:6.6.0-SNAPSHOT healthcheck: test: ["CMD-SHELL", 'python -c ''import urllib, json; response = urllib.urlopen("http://localhost:5601/api/status"); data = json.loads(response.read()); exit(1) if data["status"]["overall"]["state"] != "green" else exit(0);'''] retries: 600 diff --git a/winlogbeat/winlogbeat.reference.yml b/winlogbeat/winlogbeat.reference.yml index c1ffa865c325..9e9a8a9657fb 100644 --- a/winlogbeat/winlogbeat.reference.yml +++ b/winlogbeat/winlogbeat.reference.yml @@ -274,6 +274,11 @@ output.elasticsearch: # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + #ilm.rollover_alias: "winlogbeat" + #ilm.pattern: "{now/d}-000001" + # Set gzip compression level. #compression_level: 0 diff --git a/winlogbeat/winlogbeat.yml b/winlogbeat/winlogbeat.yml index 3b26bf8d3a93..7661b770cda0 100644 --- a/winlogbeat/winlogbeat.yml +++ b/winlogbeat/winlogbeat.yml @@ -97,6 +97,9 @@ output.elasticsearch: # Array of hosts to connect to. hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + # Optional protocol and basic auth credentials. #protocol: "https" #username: "elastic" diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index fb80e4aca50d..e73273f10b7a 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -1035,6 +1035,11 @@ output.elasticsearch: # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + #ilm.rollover_alias: "filebeat" + #ilm.pattern: "{now/d}-000001" + # Set gzip compression level. #compression_level: 0 diff --git a/x-pack/filebeat/filebeat.yml b/x-pack/filebeat/filebeat.yml index 5de1a4d1e55c..78467764cc94 100644 --- a/x-pack/filebeat/filebeat.yml +++ b/x-pack/filebeat/filebeat.yml @@ -149,6 +149,9 @@ output.elasticsearch: # Array of hosts to connect to. hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + # Optional protocol and basic auth credentials. #protocol: "https" #username: "elastic" diff --git a/x-pack/functionbeat/functionbeat.reference.yml b/x-pack/functionbeat/functionbeat.reference.yml index 65cc9e7cb21c..88c5bac11ce0 100644 --- a/x-pack/functionbeat/functionbeat.reference.yml +++ b/x-pack/functionbeat/functionbeat.reference.yml @@ -339,6 +339,11 @@ output.elasticsearch: # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + #ilm.rollover_alias: "functionbeat" + #ilm.pattern: "{now/d}-000001" + # Set gzip compression level. #compression_level: 0 diff --git a/x-pack/functionbeat/functionbeat.yml b/x-pack/functionbeat/functionbeat.yml index 9f984ba18860..f584fa8f4afa 100644 --- a/x-pack/functionbeat/functionbeat.yml +++ b/x-pack/functionbeat/functionbeat.yml @@ -157,6 +157,9 @@ output.elasticsearch: # Array of hosts to connect to. hosts: ["localhost:9200"] + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + # Optional protocol and basic auth credentials. #protocol: "https" #username: "elastic" diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml new file mode 100644 index 000000000000..f6b7ce9cf927 --- /dev/null +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -0,0 +1,1833 @@ +########################## Metricbeat Configuration ########################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see metricbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/metricbeat/index.html + +#============================ Config Reloading =============================== + +# Config reloading allows to dynamically load modules. Each file which is +# monitored must contain one or multiple modules as a list. +metricbeat.config.modules: + + # Glob pattern for configuration reloading + path: ${path.config}/conf.d/*.yml + + # Period on which files under path should be checked for changes + reload.period: 10s + + # Set to true to enable config reloading + reload.enabled: false + +# Maximum amount of time to randomly delay the start of a metricset. Use 0 to +# disable startup delay. +metricbeat.max_start_delay: 10s + +#============================== Autodiscover =================================== + +# Autodiscover allows you to detect changes in the system and spawn new modules +# as they happen. + +#metricbeat.autodiscover: + # List of enabled autodiscover providers +# providers: +# - type: docker +# templates: +# - condition: +# equals.docker.container.image: etcd +# config: +# - module: etcd +# metricsets: ["leader", "self", "store"] +# period: 10s +# hosts: ["${host}:2379"] + +#========================== Modules configuration ============================= +metricbeat.modules: + +#-------------------------------- System Module -------------------------------- +- module: system + metricsets: + - cpu # CPU usage + - load # CPU load averages + - memory # Memory usage + - network # Network IO + - process # Per process metrics + - process_summary # Process summary + - uptime # System Uptime + #- core # Per CPU core usage + #- diskio # Disk IO + #- filesystem # File system usage for each mountpoint + #- fsstat # File system summary metrics + #- raid # Raid + #- socket # Sockets and connection info (linux only) + enabled: true + period: 10s + processes: ['.*'] + + # Configure the metric types that are included by these metricsets. + cpu.metrics: ["percentages"] # The other available options are normalized_percentages and ticks. + core.metrics: ["percentages"] # The other available option is ticks. + + # A list of filesystem types to ignore. The filesystem metricset will not + # collect data from filesystems matching any of the specified types, and + # fsstats will not include data from these filesystems in its summary stats. + # If not set, types associated to virtual filesystems are automatically + # added when this information is available in the system (e.g. the list of + # `nodev` types in `/proc/filesystem`). + #filesystem.ignore_types: [] + + # These options allow you to filter out all processes that are not + # in the top N by CPU or memory, in order to reduce the number of documents created. + # If both the `by_cpu` and `by_memory` options are used, the union of the two sets + # is included. + #process.include_top_n: + + # Set to false to disable this feature and include all processes + #enabled: true + + # How many processes to include from the top by CPU. The processes are sorted + # by the `system.process.cpu.total.pct` field. + #by_cpu: 0 + + # How many processes to include from the top by memory. The processes are sorted + # by the `system.process.memory.rss.bytes` field. + #by_memory: 0 + + # If false, cmdline of a process is not cached. + #process.cmdline.cache.enabled: true + + # Enable collection of cgroup metrics from processes on Linux. + #process.cgroups.enabled: true + + # A list of regular expressions used to whitelist environment variables + # reported with the process metricset's events. Defaults to empty. + #process.env.whitelist: [] + + # Include the cumulative CPU tick values with the process metrics. Defaults + # to false. + #process.include_cpu_ticks: false + + # Raid mount point to monitor + #raid.mount_point: '/' + + # Configure reverse DNS lookup on remote IP addresses in the socket metricset. + #socket.reverse_lookup.enabled: false + #socket.reverse_lookup.success_ttl: 60s + #socket.reverse_lookup.failure_ttl: 60s + + # Diskio configurations + #diskio.include_devices: [] + +#------------------------------ Aerospike Module ------------------------------ +- module: aerospike + metricsets: ["namespace"] + enabled: true + period: 10s + hosts: ["localhost:3000"] + +#-------------------------------- Apache Module -------------------------------- +- module: apache + metricsets: ["status"] + period: 10s + enabled: true + + # Apache hosts + hosts: ["http://127.0.0.1"] + + # Path to server status. Default server-status + #server_status_path: "server-status" + + # Username of hosts. Empty by default + #username: username + + # Password of hosts. Empty by default + #password: password + +#--------------------------------- Ceph Module --------------------------------- +- module: ceph + metricsets: ["cluster_disk", "cluster_health", "monitor_health", "pool_disk", "osd_tree"] + period: 10s + hosts: ["localhost:5000"] + enabled: true + +#------------------------------ Couchbase Module ------------------------------ +- module: couchbase + metricsets: ["bucket", "cluster", "node"] + period: 10s + hosts: ["localhost:8091"] + enabled: true + +#-------------------------------- Docker Module -------------------------------- +- module: docker + metricsets: + - "container" + - "cpu" + - "diskio" + - "healthcheck" + - "info" + #- "image" + - "memory" + - "network" + hosts: ["unix:///var/run/docker.sock"] + period: 10s + enabled: true + + # If set to true, replace dots in labels with `_`. + #labels.dedot: false + + # If set to true, collects metrics per core. + #cpu.cores: true + + # To connect to Docker over TLS you must specify a client and CA certificate. + #ssl: + #certificate_authority: "/etc/pki/root/ca.pem" + #certificate: "/etc/pki/client/cert.pem" + #key: "/etc/pki/client/cert.key" + +#------------------------------ Dropwizard Module ------------------------------ +- module: dropwizard + metricsets: ["collector"] + period: 10s + hosts: ["localhost:8080"] + metrics_path: /metrics/metrics + namespace: example + enabled: true + +#---------------------------- Elasticsearch Module ---------------------------- +- module: elasticsearch + metricsets: + - node + - node_stats + #- index + #- index_recovery + #- index_summary + #- shard + #- ml_job + period: 10s + hosts: ["http://localhost:9200"] + #username: "elastic" + #password: "changeme" + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Set to false to fetch all entries + #index_recovery.active_only: true + +#------------------------------ Envoyproxy Module ------------------------------ +- module: envoyproxy + metricsets: ["server"] + period: 10s + hosts: ["localhost:9901"] + +#--------------------------------- Etcd Module --------------------------------- +- module: etcd + metricsets: ["leader", "self", "store"] + period: 10s + hosts: ["localhost:2379"] + +#--------------------------------- Foo Module --------------------------------- +- module: foo + metricsets: ["bar"] + enabled: false + period: 10s + hosts: ["localhost"] + + +#-------------------------------- Golang Module -------------------------------- +- module: golang + #metricsets: + # - expvar + # - heap + period: 10s + hosts: ["localhost:6060"] + heap.path: "/debug/vars" + expvar: + namespace: "example" + path: "/debug/vars" + +#------------------------------- Graphite Module ------------------------------- +- module: graphite + metricsets: ["server"] + enabled: true + + # Host address to listen on. Default localhost. + #host: localhost + + # Listening port. Default 2003. + #port: 2003 + + # Protocol to listen on. This can be udp or tcp. Default udp. + #protocol: "udp" + + # Receive buffer size in bytes + #receive_buffer_size: 1024 + + #templates: + # - filter: "test.*.bash.*" # This would match metrics like test.localhost.bash.stats + # namespace: "test" + # template: ".host.shell.metric*" # test.localhost.bash.stats would become metric=stats and tags host=localhost,shell=bash + # delimiter: "_" + + +#------------------------------- HAProxy Module ------------------------------- +- module: haproxy + metricsets: ["info", "stat"] + period: 10s + hosts: ["tcp://127.0.0.1:14567"] + enabled: true + +#--------------------------------- HTTP Module --------------------------------- +- module: http + #metricsets: + # - json + period: 10s + hosts: ["localhost:80"] + namespace: "json_namespace" + path: "/" + #body: "" + #method: "GET" + #username: "user" + #password: "secret" + #request.enabled: false + #response.enabled: false + #json.is_array: false + #dedot.enabled: false + +- module: http + #metricsets: + # - server + host: "localhost" + port: "8080" + enabled: false + #paths: + # - path: "/foo" + # namespace: "foo" + # fields: # added to the the response in root. overwrites existing fields + # key: "value" + +#------------------------------- Jolokia Module ------------------------------- +- module: jolokia + #metricsets: ["jmx"] + period: 10s + hosts: ["localhost"] + namespace: "metrics" + #path: "/jolokia/?ignoreErrors=true&canonicalNaming=false" + #username: "user" + #password: "secret" + jmx.mappings: + #- mbean: 'java.lang:type=Runtime' + # attributes: + # - attr: Uptime + # field: uptime + #- mbean: 'java.lang:type=Memory' + # attributes: + # - attr: HeapMemoryUsage + # field: memory.heap_usage + # - attr: NonHeapMemoryUsage + # field: memory.non_heap_usage + # GC Metrics - this depends on what is available on your JVM + #- mbean: 'java.lang:type=GarbageCollector,name=ConcurrentMarkSweep' + # attributes: + # - attr: CollectionTime + # field: gc.cms_collection_time + # - attr: CollectionCount + # field: gc.cms_collection_count + + jmx.application: + jmx.instance: + +#-------------------------------- Kafka Module -------------------------------- +- module: kafka + metricsets: ["consumergroup", "partition"] + period: 10s + hosts: ["localhost:9092"] + enabled: true + + #client_id: metricbeat + #retries: 3 + #backoff: 250ms + + # List of Topics to query metadata for. If empty, all topics will be queried. + #topics: [] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # SASL authentication + #username: "" + #password: "" + +#-------------------------------- Kibana Module -------------------------------- +- module: kibana + metricsets: ["status"] + period: 10s + hosts: ["localhost:5601"] + basepath: "" + enabled: true + +#------------------------------ Kubernetes Module ------------------------------ +# Node metrics, from kubelet: +- module: kubernetes + metricsets: + - container + - node + - pod + - system + - volume + period: 10s + hosts: ["localhost:10255"] + enabled: true + #bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + #ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + #ssl.certificate: "/etc/pki/client/cert.pem" + #ssl.key: "/etc/pki/client/cert.key" + + # Enriching parameters: + add_metadata: true + in_cluster: true + # When used outside the cluster: + #host: node_name + #kube_config: ~/.kube/config + +# State metrics from kube-state-metrics service: +- module: kubernetes + enabled: true + metricsets: + - state_node + - state_deployment + - state_replicaset + - state_statefulset + - state_pod + - state_container + period: 10s + hosts: ["kube-state-metrics:8080"] + + # Enriching parameters: + add_metadata: true + in_cluster: true + # When used outside the cluster: + #host: node_name + #kube_config: ~/.kube/config + +# Kubernetes events +- module: kubernetes + enabled: true + metricsets: + - event + +# Kubernetes API server +- module: kubernetes + enabled: true + metricsets: + - apiserver + hosts: ["https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}"] + +#--------------------------------- Kvm Module --------------------------------- +- module: kvm + metricsets: ["dommemstat"] + enabled: true + period: 10s + hosts: ["unix:///var/run/libvirt/libvirt-sock"] + # For remote hosts, setup network access in libvirtd.conf + # and use the tcp scheme: + # hosts: [ "tcp://:16509" ] + + # Timeout to connect to Libvirt server + #timeout: 1s + +#------------------------------- Logstash Module ------------------------------- +- module: logstash + metricsets: ["node", "node_stats"] + enabled: true + period: 10s + hosts: ["localhost:9600"] + +#------------------------------ Memcached Module ------------------------------ +- module: memcached + metricsets: ["stats"] + period: 10s + hosts: ["localhost:11211"] + enabled: true + +#------------------------------- MongoDB Module ------------------------------- +- module: mongodb + metricsets: ["dbstats", "status", "collstats", "metrics", "replstatus"] + period: 10s + enabled: true + + # The hosts must be passed as MongoDB URLs in the format: + # [mongodb://][user:pass@]host[:port]. + # The username and password can also be set using the respective configuration + # options. The credentials in the URL take precedence over the username and + # password configuration options. + hosts: ["localhost:27017"] + + # Optional SSL. By default is off. + #ssl.enabled: true + + # Mode of verification of server certificate ('none' or 'full') + #ssl.verification_mode: 'full' + + # List of root certificates for TLS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Username to use when connecting to MongoDB. Empty by default. + #username: user + + # Password to use when connecting to MongoDB. Empty by default. + #password: pass + +#-------------------------------- Munin Module -------------------------------- +- module: munin + metricsets: ["node"] + enabled: true + period: 10s + hosts: ["localhost:4949"] + node.namespace: node + +#-------------------------------- MySQL Module -------------------------------- +- module: mysql + metricsets: + - "status" + # - "galera_status" + period: 10s + + # Host DSN should be defined as "user:pass@tcp(127.0.0.1:3306)/" + # The username and password can either be set in the DSN or using the username + # and password config options. Those specified in the DSN take precedence. + hosts: ["root:secret@tcp(127.0.0.1:3306)/"] + + # Username of hosts. Empty by default. + #username: root + + # Password of hosts. Empty by default. + #password: secret + + # By setting raw to true, all raw fields from the status metricset will be added to the event. + #raw: false + +#-------------------------------- Nginx Module -------------------------------- +- module: nginx + metricsets: ["stubstatus"] + enabled: true + period: 10s + + # Nginx hosts + hosts: ["http://127.0.0.1"] + + # Path to server status. Default server-status + server_status_path: "server-status" + +#------------------------------- PHP_FPM Module ------------------------------- +- module: php_fpm + metricsets: + - pool + #- process + enabled: true + period: 10s + status_path: "/status" + hosts: ["localhost:8080"] + +#------------------------------ PostgreSQL Module ------------------------------ +- module: postgresql + enabled: true + metricsets: + # Stats about every PostgreSQL database + - database + + # Stats about the background writer process's activity + - bgwriter + + # Stats about every PostgreSQL process + - activity + + period: 10s + + # The host must be passed as PostgreSQL URL. Example: + # postgres://localhost:5432?sslmode=disable + # The available parameters are documented here: + # https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters + hosts: ["postgres://localhost:5432"] + + # Username to use when connecting to PostgreSQL. Empty by default. + #username: user + + # Password to use when connecting to PostgreSQL. Empty by default. + #password: pass + +#------------------------------ Prometheus Module ------------------------------ +- module: prometheus + metricsets: ["stats"] + enabled: true + period: 10s + hosts: ["localhost:9090"] + #metrics_path: /metrics + #namespace: example + +- module: prometheus + metricsets: ["collector"] + enabled: true + period: 10s + hosts: ["localhost:9090"] + #metrics_path: /metrics + #namespace: example + + # This can be used for service account based authorization: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + #ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + +#------------------------------- RabbitMQ Module ------------------------------- +- module: rabbitmq + metricsets: ["node", "queue", "connection"] + enabled: true + period: 10s + hosts: ["localhost:15672"] + + # Management path prefix, if `management.path_prefix` is set in RabbitMQ + # configuration, it has to be set to the same value. + #management_path_prefix: "" + + #username: guest + #password: guest + +#-------------------------------- Redis Module -------------------------------- +- module: redis + metricsets: ["info", "keyspace"] + enabled: true + period: 10s + + # Redis hosts + hosts: ["127.0.0.1:6379"] + + # Timeout after which time a metricset should return an error + # Timeout is by default defined as period, as a fetch of a metricset + # should never take longer then period, as otherwise calls can pile up. + #timeout: 1s + + # Optional fields to be added to each event + #fields: + # datacenter: west + + # Network type to be used for redis connection. Default: tcp + #network: tcp + + # Max number of concurrent connections. Default: 10 + #maxconn: 10 + + # Filters can be used to reduce the number of fields sent. + #processors: + # - include_fields: + # fields: ["beat", "metricset", "redis.info.stats"] + + # Redis AUTH password. Empty by default. + #password: foobared + +#------------------------------- Traefik Module ------------------------------- +- module: traefik + metricsets: ["health"] + period: 10s + hosts: ["localhost:8080"] + +#-------------------------------- Uwsgi Module -------------------------------- +- module: uwsgi + metricsets: ["status"] + enable: true + period: 10s + hosts: ["tcp://127.0.0.1:9191"] + +#------------------------------- VSphere Module ------------------------------- +- module: vsphere + enabled: true + metricsets: ["datastore", "host", "virtualmachine"] + period: 10s + hosts: ["https://localhost/sdk"] + + username: "user" + password: "password" + # If insecure is true, don't verify the server's certificate chain + insecure: false + # Get custom fields when using virtualmachine metric set. Default false. + # get_custom_fields: false + +#------------------------------- Windows Module ------------------------------- +- module: windows + metricsets: ["perfmon"] + enabled: true + period: 10s + perfmon.ignore_non_existent_counters: true + perfmon.counters: + # - instance_label: processor.name + # instance_name: total + # measurement_label: processor.time.total.pct + # query: '\Processor Information(_Total)\% Processor Time' + +- module: windows + metricsets: ["service"] + enabled: true + period: 60s + +#------------------------------ ZooKeeper Module ------------------------------ +- module: zookeeper + enabled: true + metricsets: ["mntr"] + period: 10s + hosts: ["localhost:2181"] + + + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # The default value is set to 2048. + # A value of 0 ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, +# decode_json_fields, and add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# +# The following example tokenizes the string into fields: +# +#processors: +#- dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +#- add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +#- add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +#- add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +#- add_docker_metadata: ~ +# +# The following example enriches each event with host metadata. +# +#processors: +#- add_host_metadata: +# netinfo.enabled: false +# +# The following example enriches each event with process metadata using +# process IDs included in the event. +# +#processors: +#- add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent +# +# The following example decodes fields containing JSON strings +# and replaces the strings with valid JSON objects. +# +#processors: +#- decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false + +#============================= Elastic Cloud ================================== + +# These settings simplify using metricbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ====================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------- +output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["localhost:9200"] + + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + #ilm.rollover_alias: "metricbeat" + #ilm.pattern: "{now/d}-000001" + + # Set gzip compression level. + #compression_level: 0 + + # Configure escaping html symbols in strings. + #escape_html: true + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "metricbeat" plus date + # and generates [metricbeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "metricbeat-%{[beat.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing a request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + +#----------------------------- Logstash output --------------------------------- +#output.logstash: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Configure escaping html symbols in strings. + #escape_html: true + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set) + #ttl: 30s + + # Optional load balance the events between the Logstash hosts. Default is false. + #loadbalance: false + + # Number of batches to be sent asynchronously to Logstash while processing + # new batches. + #pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # transaction. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + #slow_start: false + + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + + # Optional index name. The default index name is set to metricbeat + # in all lowercase. + #index: 'metricbeat' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting + # and retry until all events are published. Set max_retries to a value less + # than 0 to retry until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Logstash request. The + # default is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Logstash server before + # timing out. The default is 30s. + #timeout: 30s + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version metricbeat is assumed to run against. Defaults to the "1.0.0". + #version: '1.0.0' + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # Set the compression level. Currently only gzip provides a compression level + # between 0 and 9. The default value is chosen by the compression algorithm. + #compression_level: 4 + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is metricbeat. + #key: metricbeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The number of seconds to wait before trying to reconnect to Redis + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Redis after a network error. The default is 60s. + #backoff.max: 60s + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/metricbeat" + + # Name of the generated files. The default is `metricbeat` and it generates + # files: `metricbeat`, `metricbeat.1`, `metricbeat.2`, etc. + #filename: metricbeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every metricbeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + +#================================= Paths ====================================== + +# The home path for the metricbeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the metricbeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the metricbeat installation. This is the default base path +# for all the files in which metricbeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a metricbeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards are disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The directory from where to read the dashboards. The default is the `kibana` +# folder in the home path. +#setup.dashboards.directory: ${path.home}/kibana + +# The URL from where to download the dashboards archive. It is used instead of +# the directory if it has a value. +#setup.dashboards.url: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the directory when it has a value. +#setup.dashboards.file: + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#setup.dashboards.beat: metricbeat + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#setup.dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#setup.dashboards.index: + +# Always use the Kibana API for loading the dashboards instead of autodetecting +# how to install the dashboards by first querying Elasticsearch. +#setup.dashboards.always_kibana: false + +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + + +#============================== Template ===================================== + +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +#setup.template.enabled: true + +# Template name. By default the template name is "metricbeat-%{[beat.version]}" +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.name: "metricbeat-%{[beat.version]}" + +# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings. +# The first part is the version of the beat and then -* is used to match all daily indices. +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.pattern: "metricbeat-%{[beat.version]}-*" + +# Path to fields.yml file to generate the template +#setup.template.fields: "${path.config}/fields.yml" + +# A list of fields to be added to the template and Kibana index pattern. Also +# specify setup.template.overwrite: true to overwrite the existing template. +# This setting is experimental. +#setup.template.append_fields: +#- name: field_name +# type: field_type + +# Enable json template loading. If this is enabled, the fields.yml is ignored. +#setup.template.json.enabled: false + +# Path to the json template file +#setup.template.json.path: "${path.config}/template.json" + +# Name under which the template is stored in Elasticsearch +#setup.template.json.name: "" + +# Overwrite existing template +#setup.template.overwrite: false + +# Elasticsearch template settings +setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + #index: + #number_of_shards: 1 + #codec: best_compression + #number_of_routing_shards: 30 + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + #_source: + #enabled: false + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP Path + #path: "" + + # Use SSL settings for HTTPS. Default is true. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + + +#================================ Logging ====================================== +# There are four options for the log output: file, stderr, syslog, eventlog +# The file output is the default. + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: false + +# Send all logging output to Windows Event Logs. The default is false. +#logging.to_eventlog: false + +# If enabled, metricbeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/metricbeat + + # The name of the files where the logs are written to. + #name: metricbeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # unix epoch. Defaults to disabled. + #interval: 0 + +# Set to true to log messages in json format. +#logging.json: false + + +#============================== Xpack Monitoring ===================================== +# metricbeat can export internal metrics to a central Elasticsearch monitoring cluster. +# This requires xpack monitoring to be enabled in Elasticsearch. +# The reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line, and leave the rest commented out. +#xpack.monitoring.elasticsearch: + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "beats_system" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + #metrics.period: 10s + #state.period: 1m + +#================================ HTTP Endpoint ====================================== +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +#============================= Process Security ================================ + +# Enable or disable seccomp system call filtering on Linux. Default is enabled. +#seccomp.enabled: true diff --git a/x-pack/metricbeat/metricbeat.yml b/x-pack/metricbeat/metricbeat.yml new file mode 100644 index 000000000000..aa4765e82205 --- /dev/null +++ b/x-pack/metricbeat/metricbeat.yml @@ -0,0 +1,151 @@ +###################### Metricbeat Configuration Example ####################### + +# This file is an example configuration file highlighting only the most common +# options. The metricbeat.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/metricbeat/index.html + +#========================== Modules configuration ============================ + +metricbeat.config.modules: + # Glob pattern for configuration loading + path: ${path.config}/modules.d/*.yml + + # Set to true to enable config reloading + reload.enabled: false + + # Period on which files under path should be checked for changes + #reload.period: 10s + +#==================== Elasticsearch template setting ========================== + +setup.template.settings: + index.number_of_shards: 1 + index.codec: best_compression + #_source.enabled: false + +#================================ General ===================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. +#fields: +# env: staging + + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards is disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The URL from where to download the dashboards archive. By default this URL +# has a value which is computed based on the Beat name and version. For released +# versions, this URL points to the dashboard archive on the artifacts.elastic.co +# website. +#setup.dashboards.url: + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Kibana Space ID + # ID of the Kibana Space into which the dashboards should be loaded. By default, + # the Default Space will be used. + #space.id: + +#============================= Elastic Cloud ================================== + +# These settings simplify using metricbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ===================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------ +output.elasticsearch: + # Array of hosts to connect to. + hosts: ["localhost:9200"] + + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + +#----------------------------- Logstash output -------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + +#================================ Processors ===================================== + +# Configure processors to enhance or manipulate events generated by the beat. + +processors: + - add_host_metadata: ~ + - add_cloud_metadata: ~ + +#================================ Logging ===================================== + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: debug + +# At debug level, you can selectively enable logging only for some components. +# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# "publish", "service". +#logging.selectors: ["*"] + +#============================== Xpack Monitoring =============================== +# metricbeat can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line. +#xpack.monitoring.elasticsearch: