diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6df748b5c53..18214bac842 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -588,6 +588,14 @@ same journal. {pull}18467[18467] `host` metadata fields when processing network data from network tap or mirror port. {pull}19209[19209] +- Add an example to packetbeat.yml of using the `forwarded` tag to disable + `host` metadata fields when processing network data from network tap or mirror + port. {pull}19209[19209] +- Add ECS fields for x509 certs, event categorization, and related IP info. {pull}19167[19167] +- Add 100-continue support {issue}15830[15830] {pull}19349[19349] +- Add initial SIP protocol support {pull}21221[21221] +- Add support for overriding the published index on a per-protocol/flow basis. {pull}22134[22134] +- Change build process for x-pack distribution {pull}21979[21979] *Functionbeat* diff --git a/libbeat/cfgfile/list.go b/libbeat/cfgfile/list.go index 9b62d95f6a9..38193ef5204 100644 --- a/libbeat/cfgfile/list.go +++ b/libbeat/cfgfile/list.go @@ -157,7 +157,9 @@ func (r *RunnerList) Has(hash uint64) bool { // HashConfig hashes a given common.Config func HashConfig(c *common.Config) (uint64, error) { var config map[string]interface{} - c.Unpack(&config) + if err := c.Unpack(&config); err != nil { + return 0, err + } return hashstructure.Hash(config, nil) } diff --git a/packetbeat/_meta/config/beat.reference.yml.tmpl b/packetbeat/_meta/config/beat.reference.yml.tmpl index 722c47102dc..5ccc9bf5a92 100644 --- a/packetbeat/_meta/config/beat.reference.yml.tmpl +++ b/packetbeat/_meta/config/beat.reference.yml.tmpl @@ -63,6 +63,9 @@ packetbeat.flows: # Set to true to publish fields with null values in events. #keep_null: false + # Overrides where flow events are indexed. + #index: my-custom-flow-index + {{header "Transaction protocols"}} packetbeat.protocols: @@ -73,6 +76,9 @@ packetbeat.protocols: # Set to true to publish fields with null values in events. #keep_null: false + # Overrides where this protocol's events are indexed. + #index: my-custom-icmp-index + - type: amqp # Enable AMQP monitoring. Default: true #enabled: true @@ -113,6 +119,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-amqp-index + - type: cassandra #Cassandra port for traffic monitoring. ports: [9042] @@ -143,6 +152,9 @@ packetbeat.protocols: # This option indicates which Operator/Operators will be ignored. #ignored_ops: ["SUPPORTED","OPTIONS"] + # Overrides where this protocol's events are indexed. + #index: my-custom-cassandra-index + - type: dhcpv4 # Configure the DHCP for IPv4 ports. ports: [67, 68] @@ -183,6 +195,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-dhcpv4-index + - type: http # Enable HTTP monitoring. Default: true #enabled: true @@ -257,6 +272,9 @@ packetbeat.protocols: # be trimmed to this size. Default is 10 MB. #max_message_size: 10485760 + # Overrides where this protocol's events are indexed. + #index: my-custom-http-index + - type: memcache # Enable memcache monitoring. Default: true #enabled: true @@ -309,6 +327,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-memcache-index + - type: mysql # Enable mysql monitoring. Default: true #enabled: true @@ -332,6 +353,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-mysql-index + - type: pgsql # Enable pgsql monitoring. Default: true #enabled: true @@ -355,6 +379,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-pgsql-index + - type: redis # Enable redis monitoring. Default: true #enabled: true @@ -387,6 +414,9 @@ packetbeat.protocols: # large enough to allow for pipelining. #queue_max_messages: 20000 + # Overrides where this protocol's events are indexed. + #index: my-custom-redis-index + - type: thrift # Enable thrift monitoring. Default: true #enabled: true @@ -445,6 +475,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-thrift-index + - type: mongodb # Enable mongodb monitoring. Default: true #enabled: true @@ -478,6 +511,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-mongodb-index + - type: nfs # Enable NFS monitoring. Default: true #enabled: true @@ -501,6 +537,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-nfs-index + - type: tls # Enable TLS monitoring. Default: true #enabled: true @@ -531,6 +570,9 @@ packetbeat.protocols: # Set to true to publish fields with null values in events. #keep_null: false + # Overrides where this protocol's events are indexed. + #index: my-custom-tls-index + - type: sip # Configure the ports where to listen for SIP traffic. You can disable the SIP protocol by commenting out the list of ports. ports: [5060] @@ -544,6 +586,9 @@ packetbeat.protocols: # Preserve original contents in event.original keep_original: true + # Overrides where this protocol's events are indexed. + #index: my-custom-sip-index + {{header "Monitored processes"}} # Packetbeat can enrich events with information about the process associated diff --git a/packetbeat/beater/packetbeat.go b/packetbeat/beater/packetbeat.go index b862bd3ee11..d72a98d4a5f 100644 --- a/packetbeat/beater/packetbeat.go +++ b/packetbeat/beater/packetbeat.go @@ -18,46 +18,32 @@ package beater import ( - "errors" "flag" - "fmt" - "sync" "time" - "github.com/tsg/gopacket/layers" - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/reload" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/libbeat/processors" + "github.com/elastic/beats/v7/libbeat/management" "github.com/elastic/beats/v7/libbeat/service" "github.com/elastic/beats/v7/packetbeat/config" - "github.com/elastic/beats/v7/packetbeat/decoder" - "github.com/elastic/beats/v7/packetbeat/flows" - "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" - "github.com/elastic/beats/v7/packetbeat/protos/icmp" - "github.com/elastic/beats/v7/packetbeat/protos/tcp" - "github.com/elastic/beats/v7/packetbeat/protos/udp" - "github.com/elastic/beats/v7/packetbeat/publish" - "github.com/elastic/beats/v7/packetbeat/sniffer" // Add packetbeat default processors _ "github.com/elastic/beats/v7/packetbeat/processor/add_kubernetes_metadata" ) -// Beater object. Contains all objects needed to run the beat -type packetbeat struct { - config config.Config - cmdLineArgs flags - sniff *sniffer.Sniffer - - // publisher/pipeline - pipeline beat.Pipeline - transPub *publish.TransactionPublisher - flows *flows.Flows -} +// this is mainly a limitation to ensure that we never deadlock +// after exiting the main select loop in centrally managed packetbeat +// in order to ensure we don't block on a channel write we make sure +// that the errors channel propagated back from the sniffers has a buffer +// that's equal to the number of sniffers that we can run, that way, if +// exiting and we throw a whole bunch of errors for some reason, each +// sniffer can write out the error even though the main loop has already +// exited with the result of the first error +var maxSniffers = 100 type flags struct { file *string @@ -79,8 +65,8 @@ func init() { } } -func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) { - config := config.Config{ +func initialConfig() config.Config { + return config.Config{ Interfaces: config.InterfacesConfig{ File: *cmdLineArgs.file, Loop: *cmdLineArgs.loop, @@ -89,111 +75,31 @@ func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) { Dumpfile: *cmdLineArgs.dumpfile, }, } - err := rawConfig.Unpack(&config) - if err != nil { - logp.Err("fails to read the beat config: %v, %v", err, config) - return nil, err - } - - pb := &packetbeat{ - config: config, - cmdLineArgs: cmdLineArgs, - } - err = pb.init(b) - if err != nil { - return nil, err - } - - return pb, nil } -// init packetbeat components -func (pb *packetbeat) init(b *beat.Beat) error { - var err error - cfg := &pb.config - // Enable the process watcher only if capturing live traffic - if cfg.Interfaces.File == "" { - err = procs.ProcWatcher.Init(cfg.Procs) - if err != nil { - logp.Critical(err.Error()) - return err - } - } else { - logp.Info("Process watcher disabled when file input is used") - } - - pb.pipeline = b.Publisher - pb.transPub, err = publish.NewTransactionPublisher( - b.Info.Name, - b.Publisher, - pb.config.IgnoreOutgoing, - pb.config.Interfaces.File == "", - ) - if err != nil { - return err - } - - logp.Debug("main", "Initializing protocol plugins") - err = protos.Protos.Init(false, pb.transPub, cfg.Protocols, cfg.ProtocolsList) - if err != nil { - return fmt.Errorf("Initializing protocol analyzers failed: %v", err) - } - - if err := pb.setupFlows(); err != nil { - return err - } - - return pb.setupSniffer() -} - -func (pb *packetbeat) setupSniffer() error { - config := &pb.config - - icmp, err := pb.icmpConfig() - if err != nil { - return err - } - - withVlans := config.Interfaces.WithVlans - withICMP := icmp.Enabled() - - filter := config.Interfaces.BpfFilter - if filter == "" && !config.Flows.IsEnabled() { - filter = protos.Protos.BpfFilter(withVlans, withICMP) - } - - pb.sniff, err = sniffer.New(false, filter, pb.createWorker, config.Interfaces) - return err +// Beater object. Contains all objects needed to run the beat +type packetbeat struct { + config *common.Config + factory *processorFactory + done chan struct{} } -func (pb *packetbeat) setupFlows() error { - config := &pb.config - if !config.Flows.IsEnabled() { - return nil - } - - processors, err := processors.New(config.Flows.Processors) - if err != nil { - return err - } - - client, err := pb.pipeline.ConnectWith(beat.ClientConfig{ - Processing: beat.ProcessingConfig{ - EventMetadata: config.Flows.EventMetadata, - Processor: processors, - KeepNull: config.Flows.KeepNull, - }, - }) - if err != nil { - return err +func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) { + configurator := config.NewAgentConfig + if !b.Manager.Enabled() { + configurator = initialConfig().FromStatic } - pb.flows, err = flows.NewFlows(client.PublishAll, config.Flows) - if err != nil { - return err + factory := newProcessorFactory(b.Info.Name, make(chan error, maxSniffers), b, configurator) + if err := factory.CheckConfig(rawConfig); err != nil { + return nil, err } - return nil + return &packetbeat{ + config: rawConfig, + factory: factory, + done: make(chan struct{}), + }, nil } func (pb *packetbeat) Run(b *beat.Beat) error { @@ -205,114 +111,56 @@ func (pb *packetbeat) Run(b *beat.Beat) error { } }() - defer pb.transPub.Stop() - - timeout := pb.config.ShutdownTimeout - if timeout > 0 { - defer time.Sleep(timeout) + if !b.Manager.Enabled() { + return pb.runStatic(b, pb.factory) } + return pb.runManaged(b, pb.factory) +} - if pb.flows != nil { - pb.flows.Start() - defer pb.flows.Stop() +func (pb *packetbeat) runStatic(b *beat.Beat, factory *processorFactory) error { + runner, err := factory.Create(b.Publisher, pb.config) + if err != nil { + return err } + runner.Start() + defer runner.Stop() - var wg sync.WaitGroup - errC := make(chan error, 1) + logp.Debug("main", "Waiting for the runner to finish") - // Run the sniffer in background - wg.Add(1) - go func() { - defer wg.Done() - - err := pb.sniff.Run() - if err != nil { - errC <- fmt.Errorf("Sniffer main loop failed: %v", err) - } - }() - - logp.Debug("main", "Waiting for the sniffer to finish") - wg.Wait() select { - default: - case err := <-errC: + case <-pb.done: + case err := <-factory.err: + close(pb.done) return err } - return nil } -// Called by the Beat stop function -func (pb *packetbeat) Stop() { - logp.Info("Packetbeat send stop signal") - pb.sniff.Stop() -} - -func (pb *packetbeat) createWorker(dl layers.LinkType) (sniffer.Worker, error) { - var icmp4 icmp.ICMPv4Processor - var icmp6 icmp.ICMPv6Processor - cfg, err := pb.icmpConfig() - if err != nil { - return nil, err - } - if cfg.Enabled() { - reporter, err := pb.transPub.CreateReporter(cfg) - if err != nil { - return nil, err - } - - icmp, err := icmp.New(false, reporter, cfg) - if err != nil { - return nil, err +func (pb *packetbeat) runManaged(b *beat.Beat, factory *processorFactory) error { + runner := newReloader(management.DebugK, factory, b.Publisher) + reload.Register.MustRegisterList("inputs", runner) + defer runner.Stop() + + logp.Debug("main", "Waiting for the runner to finish") + + for { + select { + case <-pb.done: + return nil + case err := <-factory.err: + // when we're managed we don't want + // to stop if the sniffer(s) exited without an error + // this would happen during a configuration reload + if err != nil { + close(pb.done) + return err + } } - - icmp4 = icmp - icmp6 = icmp - } - - tcp, err := tcp.NewTCP(&protos.Protos) - if err != nil { - return nil, err } - - udp, err := udp.NewUDP(&protos.Protos) - if err != nil { - return nil, err - } - - worker, err := decoder.New(pb.flows, dl, icmp4, icmp6, tcp, udp) - if err != nil { - return nil, err - } - - return worker, nil } -func (pb *packetbeat) icmpConfig() (*common.Config, error) { - var icmp *common.Config - if pb.config.Protocols["icmp"].Enabled() { - icmp = pb.config.Protocols["icmp"] - } - - for _, cfg := range pb.config.ProtocolsList { - info := struct { - Type string `config:"type" validate:"required"` - }{} - - if err := cfg.Unpack(&info); err != nil { - return nil, err - } - - if info.Type != "icmp" { - continue - } - - if icmp != nil { - return nil, errors.New("More then one icmp configurations found") - } - - icmp = cfg - } - - return icmp, nil +// Called by the Beat stop function +func (pb *packetbeat) Stop() { + logp.Info("Packetbeat send stop signal") + close(pb.done) } diff --git a/packetbeat/beater/processor.go b/packetbeat/beater/processor.go new file mode 100644 index 00000000000..d6aafbb1a7f --- /dev/null +++ b/packetbeat/beater/processor.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package beater + +import ( + "fmt" + "sync" + "time" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/cfgfile" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/publisher/pipeline" + + "github.com/elastic/beats/v7/packetbeat/config" + "github.com/elastic/beats/v7/packetbeat/flows" + "github.com/elastic/beats/v7/packetbeat/procs" + "github.com/elastic/beats/v7/packetbeat/protos" + "github.com/elastic/beats/v7/packetbeat/publish" + "github.com/elastic/beats/v7/packetbeat/sniffer" +) + +type processor struct { + wg sync.WaitGroup + publisher *publish.TransactionPublisher + flows *flows.Flows + sniffer *sniffer.Sniffer + shutdownTimeout time.Duration + err chan error +} + +func newProcessor(shutdownTimeout time.Duration, publisher *publish.TransactionPublisher, flows *flows.Flows, sniffer *sniffer.Sniffer, err chan error) *processor { + return &processor{ + publisher: publisher, + flows: flows, + sniffer: sniffer, + err: err, + shutdownTimeout: shutdownTimeout, + } +} + +func (p *processor) String() string { + return "packetbeat.processor" +} + +func (p *processor) Start() { + if p.flows != nil { + p.flows.Start() + } + p.wg.Add(1) + go func() { + defer p.wg.Done() + + err := p.sniffer.Run() + if err != nil { + p.err <- fmt.Errorf("sniffer loop failed: %v", err) + return + } + p.err <- nil + }() +} + +func (p *processor) Stop() { + p.sniffer.Stop() + if p.flows != nil { + p.flows.Stop() + } + p.wg.Wait() + // wait for shutdownTimeout to let the publisher flush + // whatever pending events + if p.shutdownTimeout > 0 { + time.Sleep(p.shutdownTimeout) + } + p.publisher.Stop() +} + +type processorFactory struct { + name string + err chan error + beat *beat.Beat + configurator func(*common.Config) (config.Config, error) +} + +func newProcessorFactory(name string, err chan error, beat *beat.Beat, configurator func(*common.Config) (config.Config, error)) *processorFactory { + return &processorFactory{ + name: name, + err: err, + beat: beat, + configurator: configurator, + } +} + +func (p *processorFactory) Create(pipeline beat.PipelineConnector, cfg *common.Config) (cfgfile.Runner, error) { + config, err := p.configurator(cfg) + if err != nil { + logp.Err("Failed to read the beat config: %v, %v", err, config) + return nil, err + } + + publisher, err := publish.NewTransactionPublisher( + p.beat.Info.Name, + p.beat.Publisher, + config.IgnoreOutgoing, + config.Interfaces.File == "", + ) + if err != nil { + return nil, err + } + + watcher := procs.ProcessesWatcher{} + // Enable the process watcher only if capturing live traffic + if config.Interfaces.File == "" { + err = watcher.Init(config.Procs) + if err != nil { + logp.Critical(err.Error()) + return nil, err + } + } else { + logp.Info("Process watcher disabled when file input is used") + } + + logp.Debug("main", "Initializing protocol plugins") + protocols := protos.NewProtocols() + err = protocols.Init(false, publisher, watcher, config.Protocols, config.ProtocolsList) + if err != nil { + return nil, fmt.Errorf("Initializing protocol analyzers failed: %v", err) + } + flows, err := setupFlows(pipeline, watcher, config) + if err != nil { + return nil, err + } + sniffer, err := setupSniffer(config, protocols, workerFactory(publisher, protocols, watcher, flows, config)) + if err != nil { + return nil, err + } + + return newProcessor(config.ShutdownTimeout, publisher, flows, sniffer, p.err), nil +} + +func (p *processorFactory) CheckConfig(config *common.Config) error { + runner, err := p.Create(pipeline.NewNilPipeline(), config) + if err != nil { + return err + } + runner.Stop() + return nil +} diff --git a/packetbeat/beater/reloader.go b/packetbeat/beater/reloader.go new file mode 100644 index 00000000000..c6925e2fa95 --- /dev/null +++ b/packetbeat/beater/reloader.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package beater + +import ( + "fmt" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/cfgfile" + "github.com/elastic/beats/v7/libbeat/common/reload" +) + +type reloader struct { + *cfgfile.RunnerList +} + +func newReloader(name string, factory *processorFactory, pipeline beat.PipelineConnector) *reloader { + return &reloader{ + RunnerList: cfgfile.NewRunnerList(name, factory, pipeline), + } +} + +func (r *reloader) Reload(configs []*reload.ConfigWithMeta) error { + if len(configs) > maxSniffers { + return fmt.Errorf("only %d inputs are currently supported", maxSniffers) + } + return r.RunnerList.Reload(configs) +} diff --git a/packetbeat/beater/setup.go b/packetbeat/beater/setup.go new file mode 100644 index 00000000000..f8ed8b0aea6 --- /dev/null +++ b/packetbeat/beater/setup.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package beater + +import ( + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/processors" + "github.com/elastic/beats/v7/packetbeat/config" + "github.com/elastic/beats/v7/packetbeat/flows" + "github.com/elastic/beats/v7/packetbeat/procs" + "github.com/elastic/beats/v7/packetbeat/protos" + "github.com/elastic/beats/v7/packetbeat/sniffer" +) + +func setupSniffer(cfg config.Config, protocols *protos.ProtocolsStruct, workerFactory sniffer.WorkerFactory) (*sniffer.Sniffer, error) { + icmp, err := cfg.ICMP() + if err != nil { + return nil, err + } + + filter := cfg.Interfaces.BpfFilter + if filter == "" && !cfg.Flows.IsEnabled() { + filter = protocols.BpfFilter(cfg.Interfaces.WithVlans, icmp.Enabled()) + } + + return sniffer.New(false, filter, workerFactory, cfg.Interfaces) +} + +func setupFlows(pipeline beat.Pipeline, watcher procs.ProcessesWatcher, cfg config.Config) (*flows.Flows, error) { + if !cfg.Flows.IsEnabled() { + return nil, nil + } + + processors, err := processors.New(cfg.Flows.Processors) + if err != nil { + return nil, err + } + + clientConfig := beat.ClientConfig{ + Processing: beat.ProcessingConfig{ + EventMetadata: cfg.Flows.EventMetadata, + Processor: processors, + KeepNull: cfg.Flows.KeepNull, + }, + } + if cfg.Flows.Index != "" { + clientConfig.Processing.Meta = common.MapStr{"index": cfg.Flows.Index} + } + + client, err := pipeline.ConnectWith(clientConfig) + if err != nil { + return nil, err + } + + return flows.NewFlows(client.PublishAll, watcher, cfg.Flows) +} diff --git a/packetbeat/beater/worker.go b/packetbeat/beater/worker.go new file mode 100644 index 00000000000..5dd6a514454 --- /dev/null +++ b/packetbeat/beater/worker.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package beater + +import ( + "github.com/tsg/gopacket/layers" + + "github.com/elastic/beats/v7/packetbeat/config" + "github.com/elastic/beats/v7/packetbeat/decoder" + "github.com/elastic/beats/v7/packetbeat/flows" + "github.com/elastic/beats/v7/packetbeat/procs" + "github.com/elastic/beats/v7/packetbeat/protos" + "github.com/elastic/beats/v7/packetbeat/protos/icmp" + "github.com/elastic/beats/v7/packetbeat/protos/tcp" + "github.com/elastic/beats/v7/packetbeat/protos/udp" + "github.com/elastic/beats/v7/packetbeat/publish" + "github.com/elastic/beats/v7/packetbeat/sniffer" +) + +func workerFactory(publisher *publish.TransactionPublisher, protocols *protos.ProtocolsStruct, watcher procs.ProcessesWatcher, flows *flows.Flows, cfg config.Config) func(dl layers.LinkType) (sniffer.Worker, error) { + return func(dl layers.LinkType) (sniffer.Worker, error) { + var icmp4 icmp.ICMPv4Processor + var icmp6 icmp.ICMPv6Processor + config, err := cfg.ICMP() + if err != nil { + return nil, err + } + if config.Enabled() { + reporter, err := publisher.CreateReporter(config) + if err != nil { + return nil, err + } + + icmp, err := icmp.New(false, reporter, watcher, config) + if err != nil { + return nil, err + } + + icmp4 = icmp + icmp6 = icmp + } + + tcp, err := tcp.NewTCP(protocols) + if err != nil { + return nil, err + } + + udp, err := udp.NewUDP(protocols) + if err != nil { + return nil, err + } + + worker, err := decoder.New(flows, dl, icmp4, icmp6, tcp, udp) + if err != nil { + return nil, err + } + + return worker, nil + } +} diff --git a/packetbeat/config/agent.go b/packetbeat/config/agent.go new file mode 100644 index 00000000000..30f64630d44 --- /dev/null +++ b/packetbeat/config/agent.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +import ( + "fmt" + "runtime" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/go-ucfg" +) + +type datastream struct { + Namespace string `config:"namespace"` + Dataset string `config:"dataset"` + Type string `config:"type"` +} + +type agentInput struct { + Type string `config:"type"` + Datastream datastream `config:"data_stream"` + Processors []common.MapStr `config:"processors"` + Streams []map[string]interface{} `config:"streams"` +} + +var osDefaultDevices = map[string]string{ + "darwin": "en0", + "linux": "any", +} + +func defaultDevice() string { + if device, found := osDefaultDevices[runtime.GOOS]; found { + return device + } + return "0" +} + +func (i agentInput) addProcessorsAndIndex(cfg *common.Config) (*common.Config, error) { + namespace := i.Datastream.Namespace + if namespace == "" { + namespace = "default" + } + datastreamConfig := struct { + Datastream datastream `config:"data_stream"` + }{} + if err := cfg.Unpack(&datastreamConfig); err != nil { + return nil, err + } + mergeConfig, err := common.NewConfigFrom(common.MapStr{ + "index": datastreamConfig.Datastream.Type + "-" + datastreamConfig.Datastream.Dataset + "-" + namespace, + "processors": append([]common.MapStr{ + common.MapStr{ + "add_fields": common.MapStr{ + "target": "data_stream", + "fields": common.MapStr{ + "type": datastreamConfig.Datastream.Type, + "dataset": datastreamConfig.Datastream.Dataset, + "namespace": namespace, + }, + }, + }, + common.MapStr{ + "add_fields": common.MapStr{ + "target": "event", + "fields": common.MapStr{ + "dataset": datastreamConfig.Datastream.Dataset, + }, + }, + }, + }, i.Processors...), + }) + if err != nil { + return nil, err + } + if err := cfg.MergeWithOpts(mergeConfig, ucfg.FieldAppendValues("processors")); err != nil { + return nil, err + } + return cfg, nil +} + +// NewAgentConfig allows the packetbeat configuration to understand +// agent semantics +func NewAgentConfig(cfg *common.Config) (Config, error) { + logp.Debug("agent", "Normalizing agent configuration") + var input agentInput + config := Config{ + Interfaces: InterfacesConfig{ + // TODO: make this configurable rather than just using the default device + Device: defaultDevice(), + }, + } + if err := cfg.Unpack(&input); err != nil { + return config, err + } + + logp.Debug("agent", fmt.Sprintf("Found %d inputs", len(input.Streams))) + for _, stream := range input.Streams { + if rawStreamType, ok := stream["type"]; ok { + streamType, ok := rawStreamType.(string) + if !ok { + return config, fmt.Errorf("invalid input type of: '%T'", rawStreamType) + } + logp.Debug("agent", fmt.Sprintf("Found agent configuration for %v", streamType)) + cfg, err := common.NewConfigFrom(stream) + if err != nil { + return config, err + } + cfg, err = input.addProcessorsAndIndex(cfg) + if err != nil { + return config, err + } + switch streamType { + case "flow": + if err := cfg.Unpack(&config.Flows); err != nil { + return config, err + } + default: + config.ProtocolsList = append(config.ProtocolsList, cfg) + } + } + } + return config, nil +} diff --git a/packetbeat/config/agent_test.go b/packetbeat/config/agent_test.go new file mode 100644 index 00000000000..2612423e0cb --- /dev/null +++ b/packetbeat/config/agent_test.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestAgentInputNormalization(t *testing.T) { + cfg, err := common.NewConfigFrom(` +type: packet +data_stream: + namespace: default +processors: + - add_fields: + target: 'elastic_agent' + fields: + id: agent-id + version: 8.0.0 + snapshot: false +streams: + - type: flow + timeout: 10s + period: 10s + keep_null: false + data_stream: + dataset: packet.flow + type: logs + - type: icmp + data_stream: + dataset: packet.icmp + type: logs +`) + require.NoError(t, err) + config, err := NewAgentConfig(cfg) + require.NoError(t, err) + + require.Equal(t, config.Flows.Timeout, "10s") + require.Equal(t, config.Flows.Index, "logs-packet.flow-default") + require.Len(t, config.ProtocolsList, 1) + + var protocol map[string]interface{} + require.NoError(t, config.ProtocolsList[0].Unpack(&protocol)) + require.Len(t, protocol["processors"].([]interface{}), 3) +} diff --git a/packetbeat/config/config.go b/packetbeat/config/config.go index 893e4828ab8..9d3818d4aa4 100644 --- a/packetbeat/config/config.go +++ b/packetbeat/config/config.go @@ -18,6 +18,7 @@ package config import ( + "errors" "time" "github.com/elastic/beats/v7/libbeat/common" @@ -35,6 +36,44 @@ type Config struct { ShutdownTimeout time.Duration `config:"shutdown_timeout"` } +// FromStatic initializes a configuration given a common.Config +func (c Config) FromStatic(cfg *common.Config) (Config, error) { + err := cfg.Unpack(&c) + if err != nil { + return c, err + } + return c, nil +} + +// ICMP returns the ICMP configuration +func (c Config) ICMP() (*common.Config, error) { + var icmp *common.Config + if c.Protocols["icmp"].Enabled() { + icmp = c.Protocols["icmp"] + } + + for _, cfg := range c.ProtocolsList { + info := struct { + Type string `config:"type" validate:"required"` + }{} + + if err := cfg.Unpack(&info); err != nil { + return nil, err + } + + if info.Type != "icmp" { + continue + } + + if icmp != nil { + return nil, errors.New("more than one icmp configuration found") + } + + icmp = cfg + } + return icmp, nil +} + type InterfacesConfig struct { Device string `config:"device"` Type string `config:"type"` @@ -57,6 +96,8 @@ type Flows struct { EventMetadata common.EventMetadata `config:",inline"` Processors processors.PluginConfig `config:"processors"` KeepNull bool `config:"keep_null"` + // Index is used to overwrite the index where flows are published + Index string `config:"index"` } type ProtocolCommon struct { diff --git a/packetbeat/docs/packetbeat-options.asciidoc b/packetbeat/docs/packetbeat-options.asciidoc index 32d9c473054..d3777c594c4 100644 --- a/packetbeat/docs/packetbeat-options.asciidoc +++ b/packetbeat/docs/packetbeat-options.asciidoc @@ -431,6 +431,11 @@ processors in your config. If this option is set to true, fields with `null` values will be published in the output document. By default, `keep_null` is set to `false`. +[float] +==== `index` + +Overrides the index that flow events are published to. + [[configuration-protocols]] == Configure which transaction protocols to monitor @@ -554,6 +559,12 @@ custom fields as top-level fields, set the `fields_under_root` option to true. If a duplicate field is declared in the general configuration, then its value will be overwritten by the value declared here. +[float] +[[packetbeat-configuration-index]] +==== `index` + +Overrides the index that events for the given protocol are published to. + [source,yaml] -------------------------------------------------------------------------------- packetbeat.protocols: diff --git a/packetbeat/flows/flows.go b/packetbeat/flows/flows.go index fb292b92bf2..d58a2c45987 100644 --- a/packetbeat/flows/flows.go +++ b/packetbeat/flows/flows.go @@ -23,6 +23,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/packetbeat/config" + "github.com/elastic/beats/v7/packetbeat/procs" ) type Flows struct { @@ -41,7 +42,7 @@ const ( defaultPeriod = 10 * time.Second ) -func NewFlows(pub Reporter, config *config.Flows) (*Flows, error) { +func NewFlows(pub Reporter, watcher procs.ProcessesWatcher, config *config.Flows) (*Flows, error) { duration := func(s string, d time.Duration) (time.Duration, error) { if s == "" { return d, nil @@ -67,7 +68,7 @@ func NewFlows(pub Reporter, config *config.Flows) (*Flows, error) { counter := &counterReg{} - worker, err := newFlowsWorker(pub, table, counter, timeout, period) + worker, err := newFlowsWorker(pub, watcher, table, counter, timeout, period) if err != nil { logp.Err("failed to configure flows processing intervals: %v", err) return nil, err diff --git a/packetbeat/flows/flows_test.go b/packetbeat/flows/flows_test.go index b1c3a5b83b9..e56b3777374 100644 --- a/packetbeat/flows/flows_test.go +++ b/packetbeat/flows/flows_test.go @@ -30,6 +30,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/packetbeat/config" + "github.com/elastic/beats/v7/packetbeat/procs" ) type flowsChan struct { @@ -50,7 +51,7 @@ func TestFlowsCounting(t *testing.T) { port1 := []byte{0, 1} port2 := []byte{0, 2} - module, err := NewFlows(nil, &config.Flows{}) + module, err := NewFlows(nil, procs.ProcessesWatcher{}, &config.Flows{}) assert.NoError(t, err) uint1, err := module.NewUint("uint1") diff --git a/packetbeat/flows/worker.go b/packetbeat/flows/worker.go index 49548db9865..eebc3c260b5 100644 --- a/packetbeat/flows/worker.go +++ b/packetbeat/flows/worker.go @@ -32,6 +32,7 @@ import ( type flowsProcessor struct { spool spool + watcher procs.ProcessesWatcher table *flowMetaTable counters *counterReg timeout time.Duration @@ -44,6 +45,7 @@ var ( func newFlowsWorker( pub Reporter, + watcher procs.ProcessesWatcher, table *flowMetaTable, counters *counterReg, timeout, period time.Duration, @@ -84,6 +86,7 @@ func newFlowsWorker( defaultBatchSize := 1024 processor := &flowsProcessor{ table: table, + watcher: watcher, counters: counters, timeout: timeout, } @@ -194,13 +197,14 @@ func (fw *flowsProcessor) report( isOver bool, intNames, uintNames, floatNames []string, ) { - event := createEvent(ts, flow, isOver, intNames, uintNames, floatNames) + event := createEvent(fw.watcher, ts, flow, isOver, intNames, uintNames, floatNames) debugf("add event: %v", event) fw.spool.publish(event) } func createEvent( + watcher procs.ProcessesWatcher, ts time.Time, f *biFlow, isOver bool, intNames, uintNames, floatNames []string, @@ -386,7 +390,7 @@ func createEvent( // Set process information if it's available if tuple.IPLength != 0 && tuple.SrcPort != 0 { - if proc := procs.ProcWatcher.FindProcessesTuple(&tuple, proto); proc != nil { + if proc := watcher.FindProcessesTuple(&tuple, proto); proc != nil { if proc.Src.PID > 0 { p := common.MapStr{ "pid": proc.Src.PID, diff --git a/packetbeat/flows/worker_test.go b/packetbeat/flows/worker_test.go index 4346d54aaf6..2827102c38c 100644 --- a/packetbeat/flows/worker_test.go +++ b/packetbeat/flows/worker_test.go @@ -30,6 +30,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" ) var ( @@ -66,7 +67,7 @@ func TestCreateEvent(t *testing.T) { } bif.stats[0] = &flowStats{uintFlags: []uint8{1, 1}, uints: []uint64{10, 1}} bif.stats[1] = &flowStats{uintFlags: []uint8{1, 1}, uints: []uint64{460, 2}} - event := createEvent(time.Now(), bif, true, nil, []string{"bytes", "packets"}, nil) + event := createEvent(procs.ProcessesWatcher{}, time.Now(), bif, true, nil, []string{"bytes", "packets"}, nil) // Validate the contents of the event. validate := lookslike.MustCompile(map[string]interface{}{ diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index 27b76e83790..07c8e36704a 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -63,6 +63,9 @@ packetbeat.flows: # Set to true to publish fields with null values in events. #keep_null: false + # Overrides where flow events are indexed. + #index: my-custom-flow-index + # =========================== Transaction protocols ============================ packetbeat.protocols: @@ -73,6 +76,9 @@ packetbeat.protocols: # Set to true to publish fields with null values in events. #keep_null: false + # Overrides where this protocol's events are indexed. + #index: my-custom-icmp-index + - type: amqp # Enable AMQP monitoring. Default: true #enabled: true @@ -113,6 +119,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-amqp-index + - type: cassandra #Cassandra port for traffic monitoring. ports: [9042] @@ -143,6 +152,9 @@ packetbeat.protocols: # This option indicates which Operator/Operators will be ignored. #ignored_ops: ["SUPPORTED","OPTIONS"] + # Overrides where this protocol's events are indexed. + #index: my-custom-cassandra-index + - type: dhcpv4 # Configure the DHCP for IPv4 ports. ports: [67, 68] @@ -183,6 +195,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-dhcpv4-index + - type: http # Enable HTTP monitoring. Default: true #enabled: true @@ -257,6 +272,9 @@ packetbeat.protocols: # be trimmed to this size. Default is 10 MB. #max_message_size: 10485760 + # Overrides where this protocol's events are indexed. + #index: my-custom-http-index + - type: memcache # Enable memcache monitoring. Default: true #enabled: true @@ -309,6 +327,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-memcache-index + - type: mysql # Enable mysql monitoring. Default: true #enabled: true @@ -332,6 +353,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-mysql-index + - type: pgsql # Enable pgsql monitoring. Default: true #enabled: true @@ -355,6 +379,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-pgsql-index + - type: redis # Enable redis monitoring. Default: true #enabled: true @@ -387,6 +414,9 @@ packetbeat.protocols: # large enough to allow for pipelining. #queue_max_messages: 20000 + # Overrides where this protocol's events are indexed. + #index: my-custom-redis-index + - type: thrift # Enable thrift monitoring. Default: true #enabled: true @@ -445,6 +475,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-thrift-index + - type: mongodb # Enable mongodb monitoring. Default: true #enabled: true @@ -478,6 +511,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-mongodb-index + - type: nfs # Enable NFS monitoring. Default: true #enabled: true @@ -501,6 +537,9 @@ packetbeat.protocols: # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-nfs-index + - type: tls # Enable TLS monitoring. Default: true #enabled: true @@ -531,6 +570,9 @@ packetbeat.protocols: # Set to true to publish fields with null values in events. #keep_null: false + # Overrides where this protocol's events are indexed. + #index: my-custom-tls-index + - type: sip # Configure the ports where to listen for SIP traffic. You can disable the SIP protocol by commenting out the list of ports. ports: [5060] @@ -544,6 +586,9 @@ packetbeat.protocols: # Preserve original contents in event.original keep_original: true + # Overrides where this protocol's events are indexed. + #index: my-custom-sip-index + # ============================ Monitored processes ============================= # Packetbeat can enrich events with information about the process associated diff --git a/packetbeat/procs/procs.go b/packetbeat/procs/procs.go index dfead47d93c..bf3daab9ff2 100644 --- a/packetbeat/procs/procs.go +++ b/packetbeat/procs/procs.go @@ -83,8 +83,6 @@ type ProcessesWatcher struct { impl processWatcherImpl } -var ProcWatcher ProcessesWatcher - func (proc *ProcessesWatcher) Init(config ProcsConfig) error { return proc.initWithImpl(config, proc) } diff --git a/packetbeat/protos/amqp/amqp.go b/packetbeat/protos/amqp/amqp.go index c361c3e7fe6..1113d4ee6df 100644 --- a/packetbeat/protos/amqp/amqp.go +++ b/packetbeat/protos/amqp/amqp.go @@ -27,6 +27,7 @@ import ( "github.com/elastic/beats/v7/libbeat/monitoring" "github.com/elastic/beats/v7/packetbeat/pb" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/protos/tcp" ) @@ -47,6 +48,7 @@ type amqpPlugin struct { transactions *common.Cache transactionTimeout time.Duration results protos.Reporter + watcher procs.ProcessesWatcher //map containing functions associated with different method numbers methodMap map[codeClass]map[codeMethod]amqpMethod @@ -64,6 +66,7 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { p := &amqpPlugin{} @@ -74,13 +77,13 @@ func New( } } - if err := p.init(results, &config); err != nil { + if err := p.init(results, watcher, &config); err != nil { return nil, err } return p, nil } -func (amqp *amqpPlugin) init(results protos.Reporter, config *amqpConfig) error { +func (amqp *amqpPlugin) init(results protos.Reporter, watcher procs.ProcessesWatcher, config *amqpConfig) error { amqp.initMethodMap() amqp.setFromConfig(config) @@ -92,6 +95,7 @@ func (amqp *amqpPlugin) init(results protos.Reporter, config *amqpConfig) error protos.DefaultTransactionHashSize) amqp.transactions.StartJanitor(amqp.transactionTimeout) amqp.results = results + amqp.watcher = watcher return nil } diff --git a/packetbeat/protos/amqp/amqp_parser.go b/packetbeat/protos/amqp/amqp_parser.go index eeaaf2a9464..6ab15ec8159 100644 --- a/packetbeat/protos/amqp/amqp_parser.go +++ b/packetbeat/protos/amqp/amqp_parser.go @@ -23,7 +23,6 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/packetbeat/procs" ) func (amqp *amqpPlugin) amqpMessageParser(s *amqpStream) (ok bool, complete bool) { @@ -336,7 +335,7 @@ func (amqp *amqpPlugin) handleAmqp(m *amqpMessage, tcptuple *common.TCPTuple, di debugf("A message is ready to be handled") m.tcpTuple = *tcptuple m.direction = dir - m.cmdlineTuple = procs.ProcWatcher.FindProcessesTupleTCP(tcptuple.IPPort()) + m.cmdlineTuple = amqp.watcher.FindProcessesTupleTCP(tcptuple.IPPort()) if m.method == "basic.publish" { amqp.handlePublishing(m) diff --git a/packetbeat/protos/amqp/amqp_test.go b/packetbeat/protos/amqp/amqp_test.go index d9f583cdc22..d67667888ef 100644 --- a/packetbeat/protos/amqp/amqp_test.go +++ b/packetbeat/protos/amqp/amqp_test.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/publish" ) @@ -45,7 +46,7 @@ func amqpModForTests() (*eventStore, *amqpPlugin) { var amqp amqpPlugin results := &eventStore{} config := defaultConfig - amqp.init(results.publish, &config) + amqp.init(results.publish, procs.ProcessesWatcher{}, &config) return results, &amqp } diff --git a/packetbeat/protos/cassandra/cassandra.go b/packetbeat/protos/cassandra/cassandra.go index ed0f48e91a4..7d3001a5159 100644 --- a/packetbeat/protos/cassandra/cassandra.go +++ b/packetbeat/protos/cassandra/cassandra.go @@ -23,6 +23,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/protos/tcp" @@ -34,6 +35,7 @@ type cassandra struct { ports protos.PortsConfig parserConfig parserConfig transConfig transactionConfig + watcher procs.ProcessesWatcher pub transPub } @@ -60,6 +62,7 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { p := &cassandra{} @@ -70,17 +73,18 @@ func New( } } - if err := p.init(results, &config); err != nil { + if err := p.init(results, watcher, &config); err != nil { return nil, err } return p, nil } -func (cassandra *cassandra) init(results protos.Reporter, config *cassandraConfig) error { +func (cassandra *cassandra) init(results protos.Reporter, watcher procs.ProcessesWatcher, config *cassandraConfig) error { if err := cassandra.setFromConfig(config); err != nil { return err } cassandra.pub.results = results + cassandra.watcher = watcher return nil } @@ -193,7 +197,7 @@ func (cassandra *cassandra) ensureConnection(private protos.ProtocolData) *conne conn := getConnection(private) if conn == nil { conn = &connection{} - conn.trans.init(&cassandra.transConfig, cassandra.pub.onTransaction) + conn.trans.init(&cassandra.transConfig, cassandra.watcher, cassandra.pub.onTransaction) } return conn } diff --git a/packetbeat/protos/cassandra/trans.go b/packetbeat/protos/cassandra/trans.go index 62d36ee3695..9b055d22c88 100644 --- a/packetbeat/protos/cassandra/trans.go +++ b/packetbeat/protos/cassandra/trans.go @@ -33,6 +33,8 @@ type transactions struct { responses messageList onTransaction transactionHandler + + watcher procs.ProcessesWatcher } type transactionConfig struct { @@ -46,8 +48,9 @@ type messageList struct { head, tail *message } -func (trans *transactions) init(c *transactionConfig, cb transactionHandler) { +func (trans *transactions) init(c *transactionConfig, watcher procs.ProcessesWatcher, cb transactionHandler) { trans.config = c + trans.watcher = watcher trans.onTransaction = cb } @@ -59,7 +62,7 @@ func (trans *transactions) onMessage( var err error msg.Tuple = *tuple msg.Transport = applayer.TransportTCP - msg.CmdlineTuple = procs.ProcWatcher.FindProcessesTupleTCP(&msg.Tuple) + msg.CmdlineTuple = trans.watcher.FindProcessesTupleTCP(&msg.Tuple) if msg.IsRequest { if isDebug { diff --git a/packetbeat/protos/dhcpv4/dhcpv4.go b/packetbeat/protos/dhcpv4/dhcpv4.go index 10d299aea76..323f46f2a54 100644 --- a/packetbeat/protos/dhcpv4/dhcpv4.go +++ b/packetbeat/protos/dhcpv4/dhcpv4.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/monitoring" "github.com/elastic/beats/v7/packetbeat/pb" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/ecs/code/go/ecs" ) @@ -45,12 +46,13 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { - return newPlugin(testMode, results, cfg) + return newPlugin(testMode, results, watcher, cfg) } -func newPlugin(testMode bool, results protos.Reporter, cfg *common.Config) (*dhcpv4Plugin, error) { +func newPlugin(testMode bool, results protos.Reporter, watcher procs.ProcessesWatcher, cfg *common.Config) (*dhcpv4Plugin, error) { config := defaultConfig if !testMode { @@ -62,14 +64,16 @@ func newPlugin(testMode bool, results protos.Reporter, cfg *common.Config) (*dhc return &dhcpv4Plugin{ dhcpv4Config: config, report: results, + watcher: watcher, log: logp.NewLogger("dhcpv4"), }, nil } type dhcpv4Plugin struct { dhcpv4Config - report protos.Reporter - log *logp.Logger + report protos.Reporter + watcher procs.ProcessesWatcher + log *logp.Logger } func (p *dhcpv4Plugin) GetPorts() []int { diff --git a/packetbeat/protos/dhcpv4/dhcpv4_test.go b/packetbeat/protos/dhcpv4/dhcpv4_test.go index 704c4d2bece..d34019323c0 100644 --- a/packetbeat/protos/dhcpv4/dhcpv4_test.go +++ b/packetbeat/protos/dhcpv4/dhcpv4_test.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/publish" ) @@ -81,7 +82,7 @@ var ( func TestParseDHCPRequest(t *testing.T) { logp.TestingSetup() - p, err := newPlugin(true, nil, nil) + p, err := newPlugin(true, nil, procs.ProcessesWatcher{}, nil) if err != nil { t.Fatal(err) } @@ -165,7 +166,7 @@ func TestParseDHCPRequest(t *testing.T) { } func TestParseDHCPACK(t *testing.T) { - p, err := newPlugin(true, nil, nil) + p, err := newPlugin(true, nil, procs.ProcessesWatcher{}, nil) if err != nil { t.Fatal(err) } diff --git a/packetbeat/protos/dns/dns.go b/packetbeat/protos/dns/dns.go index 8fbf402b6b4..15aa154276f 100644 --- a/packetbeat/protos/dns/dns.go +++ b/packetbeat/protos/dns/dns.go @@ -38,6 +38,7 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/monitoring" "github.com/elastic/beats/v7/packetbeat/pb" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" ) @@ -55,6 +56,7 @@ type dnsPlugin struct { transactionTimeout time.Duration results protos.Reporter // Channel where results are pushed. + watcher procs.ProcessesWatcher } var ( @@ -220,6 +222,7 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { p := &dnsPlugin{} @@ -230,13 +233,13 @@ func New( } } - if err := p.init(results, &config); err != nil { + if err := p.init(results, watcher, &config); err != nil { return nil, err } return p, nil } -func (dns *dnsPlugin) init(results protos.Reporter, config *dnsConfig) error { +func (dns *dnsPlugin) init(results protos.Reporter, watcher procs.ProcessesWatcher, config *dnsConfig) error { dns.setFromConfig(config) dns.transactions = common.NewCacheWithRemovalListener( dns.transactionTimeout, @@ -252,6 +255,7 @@ func (dns *dnsPlugin) init(results protos.Reporter, config *dnsConfig) error { dns.transactions.StartJanitor(dns.transactionTimeout) dns.results = results + dns.watcher = watcher return nil } diff --git a/packetbeat/protos/dns/dns_tcp.go b/packetbeat/protos/dns/dns_tcp.go index 310cf43553e..bbf7e736926 100644 --- a/packetbeat/protos/dns/dns_tcp.go +++ b/packetbeat/protos/dns/dns_tcp.go @@ -23,7 +23,6 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/protos/tcp" @@ -150,7 +149,7 @@ func (dns *dnsPlugin) handleDNS(conn *dnsConnectionData, tcpTuple *common.TCPTup message := conn.data[dir].message dnsTuple := dnsTupleFromIPPort(&message.tuple, transportTCP, decodedData.Id) - message.cmdlineTuple = procs.ProcWatcher.FindProcessesTupleTCP(tcpTuple.IPPort()) + message.cmdlineTuple = dns.watcher.FindProcessesTupleTCP(tcpTuple.IPPort()) message.data = decodedData message.length += decodeOffset diff --git a/packetbeat/protos/dns/dns_test.go b/packetbeat/protos/dns/dns_test.go index c5ee52eb5eb..17303783fca 100644 --- a/packetbeat/protos/dns/dns_test.go +++ b/packetbeat/protos/dns/dns_test.go @@ -34,6 +34,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/publish" ) @@ -111,7 +112,7 @@ func newDNS(store *eventStore, verbose bool) *dnsPlugin { "send_request": true, "send_response": true, }) - dns, err := New(false, callback, cfg) + dns, err := New(false, callback, procs.ProcessesWatcher{}, cfg) if err != nil { panic(err) } diff --git a/packetbeat/protos/dns/dns_udp.go b/packetbeat/protos/dns/dns_udp.go index 652e03bb717..c1a22c7536f 100644 --- a/packetbeat/protos/dns/dns_udp.go +++ b/packetbeat/protos/dns/dns_udp.go @@ -20,7 +20,6 @@ package dns import ( "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" ) @@ -47,7 +46,7 @@ func (dns *dnsPlugin) ParseUDP(pkt *protos.Packet) { dnsMsg := &dnsMessage{ ts: pkt.Ts, tuple: pkt.Tuple, - cmdlineTuple: procs.ProcWatcher.FindProcessesTupleUDP(&pkt.Tuple), + cmdlineTuple: dns.watcher.FindProcessesTupleUDP(&pkt.Tuple), data: dnsPkt, length: packetSize, } diff --git a/packetbeat/protos/http/http.go b/packetbeat/protos/http/http.go index 4b2367c0239..3dd7484822e 100644 --- a/packetbeat/protos/http/http.go +++ b/packetbeat/protos/http/http.go @@ -97,6 +97,7 @@ type httpPlugin struct { transactionTimeout time.Duration results protos.Reporter + watcher procs.ProcessesWatcher } var ( @@ -111,6 +112,7 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { p := &httpPlugin{} @@ -121,19 +123,20 @@ func New( } } - if err := p.init(results, &config); err != nil { + if err := p.init(results, watcher, &config); err != nil { return nil, err } return p, nil } // Init initializes the HTTP protocol analyser. -func (http *httpPlugin) init(results protos.Reporter, config *httpConfig) error { +func (http *httpPlugin) init(results protos.Reporter, watcher procs.ProcessesWatcher, config *httpConfig) error { http.setFromConfig(config) isDebug = logp.IsDebug("http") isDetailed = logp.IsDebug("httpdetailed") http.results = results + http.watcher = watcher return nil } @@ -435,7 +438,7 @@ func (http *httpPlugin) handleHTTP( m.tcpTuple = *tcptuple m.direction = dir - m.cmdlineTuple = procs.ProcWatcher.FindProcessesTupleTCP(tcptuple.IPPort()) + m.cmdlineTuple = http.watcher.FindProcessesTupleTCP(tcptuple.IPPort()) http.hideHeaders(m) if m.isRequest { diff --git a/packetbeat/protos/http/http_test.go b/packetbeat/protos/http/http_test.go index 69c8cd792ed..42f5d366c2f 100644 --- a/packetbeat/protos/http/http_test.go +++ b/packetbeat/protos/http/http_test.go @@ -33,6 +33,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/publish" ) @@ -88,7 +89,7 @@ func httpModForTests(store *eventStore) *httpPlugin { callback = store.publish } - http, err := New(false, callback, common.NewConfig()) + http, err := New(false, callback, procs.ProcessesWatcher{}, common.NewConfig()) if err != nil { panic(err) } diff --git a/packetbeat/protos/icmp/icmp.go b/packetbeat/protos/icmp/icmp.go index 6fb210fd871..f86dd291886 100644 --- a/packetbeat/protos/icmp/icmp.go +++ b/packetbeat/protos/icmp/icmp.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/beats/v7/packetbeat/flows" "github.com/elastic/beats/v7/packetbeat/pb" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/tsg/gopacket/layers" @@ -45,6 +46,7 @@ type icmpPlugin struct { transactionTimeout time.Duration results protos.Reporter + watcher procs.ProcessesWatcher } type ICMPv4Processor interface { @@ -74,7 +76,7 @@ var ( duplicateRequests = monitoring.NewInt(nil, "icmp.duplicate_requests") ) -func New(testMode bool, results protos.Reporter, cfg *common.Config) (*icmpPlugin, error) { +func New(testMode bool, results protos.Reporter, watcher procs.ProcessesWatcher, cfg *common.Config) (*icmpPlugin, error) { p := &icmpPlugin{} config := defaultConfig if !testMode { @@ -83,13 +85,13 @@ func New(testMode bool, results protos.Reporter, cfg *common.Config) (*icmpPlugi } } - if err := p.init(results, &config); err != nil { + if err := p.init(results, watcher, &config); err != nil { return nil, err } return p, nil } -func (icmp *icmpPlugin) init(results protos.Reporter, config *icmpConfig) error { +func (icmp *icmpPlugin) init(results protos.Reporter, watcher procs.ProcessesWatcher, config *icmpConfig) error { icmp.setFromConfig(config) var err error @@ -112,6 +114,7 @@ func (icmp *icmpPlugin) init(results protos.Reporter, config *icmpConfig) error icmp.transactions.StartJanitor(icmp.transactionTimeout) icmp.results = results + icmp.watcher = watcher return nil } diff --git a/packetbeat/protos/icmp/icmp_test.go b/packetbeat/protos/icmp/icmp_test.go index 3ad537fa7d4..fc9508fbcdc 100644 --- a/packetbeat/protos/icmp/icmp_test.go +++ b/packetbeat/protos/icmp/icmp_test.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/tsg/gopacket" @@ -60,7 +61,7 @@ func TestIcmpDirection(t *testing.T) { func BenchmarkIcmpProcessICMPv4(b *testing.B) { logp.TestingSetup(logp.WithSelectors("icmp", "icmpdetailed")) - icmp, err := New(true, func(beat.Event) {}, common.NewConfig()) + icmp, err := New(true, func(beat.Event) {}, procs.ProcessesWatcher{}, common.NewConfig()) if err != nil { b.Error("Failed to create ICMP processor") return diff --git a/packetbeat/protos/memcache/memcache.go b/packetbeat/protos/memcache/memcache.go index e59550287a5..39bfccd255a 100644 --- a/packetbeat/protos/memcache/memcache.go +++ b/packetbeat/protos/memcache/memcache.go @@ -30,6 +30,7 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/monitoring" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/protos/applayer" ) @@ -38,6 +39,7 @@ import ( type memcache struct { ports protos.PortsConfig results protos.Reporter + watcher procs.ProcessesWatcher config parserConfig udpMemcache @@ -131,6 +133,7 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { p := &memcache{} @@ -141,14 +144,14 @@ func New( } } - if err := p.init(results, &config); err != nil { + if err := p.init(results, watcher, &config); err != nil { return nil, err } return p, nil } // Called to initialize the Plugin -func (mc *memcache) init(results protos.Reporter, config *memcacheConfig) error { +func (mc *memcache) init(results protos.Reporter, watcher procs.ProcessesWatcher, config *memcacheConfig) error { debug("init memcache plugin") mc.handler = mc @@ -158,6 +161,7 @@ func (mc *memcache) init(results protos.Reporter, config *memcacheConfig) error mc.udpConnections = make(map[common.HashableIPPortTuple]*udpConnection) mc.results = results + mc.watcher = watcher return nil } diff --git a/packetbeat/protos/memcache/memcache_test.go b/packetbeat/protos/memcache/memcache_test.go index 7e6f61cd67e..486e4eccbd0 100644 --- a/packetbeat/protos/memcache/memcache_test.go +++ b/packetbeat/protos/memcache/memcache_test.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/packetbeat/procs" ) type memcacheTest struct { @@ -36,7 +37,7 @@ type memcacheTest struct { func newMemcacheTest(config memcacheConfig) *memcacheTest { mct := &memcacheTest{} mc := &memcache{} - mc.init(nil, &config) + mc.init(nil, procs.ProcessesWatcher{}, &config) mc.handler = mct mct.mc = mc return mct diff --git a/packetbeat/protos/memcache/plugin_tcp.go b/packetbeat/protos/memcache/plugin_tcp.go index e9dded17dd6..830a0cd64a5 100644 --- a/packetbeat/protos/memcache/plugin_tcp.go +++ b/packetbeat/protos/memcache/plugin_tcp.go @@ -25,7 +25,6 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/protos/applayer" "github.com/elastic/beats/v7/packetbeat/protos/tcp" @@ -191,7 +190,7 @@ func (mc *memcache) onTCPMessage( ) error { msg.Tuple = *tuple msg.Transport = applayer.TransportTCP - msg.CmdlineTuple = procs.ProcWatcher.FindProcessesTupleTCP(tuple) + msg.CmdlineTuple = mc.watcher.FindProcessesTupleTCP(tuple) if msg.IsRequest { return mc.onTCPRequest(conn, tuple, dir, msg) diff --git a/packetbeat/protos/memcache/plugin_udp.go b/packetbeat/protos/memcache/plugin_udp.go index 850c6e421fb..441b286a49e 100644 --- a/packetbeat/protos/memcache/plugin_udp.go +++ b/packetbeat/protos/memcache/plugin_udp.go @@ -27,7 +27,6 @@ import ( "github.com/elastic/beats/v7/libbeat/common/streambuf" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/protos/applayer" ) @@ -184,7 +183,7 @@ func (mc *memcache) onUDPMessage( } msg.Tuple = *tuple msg.Transport = applayer.TransportUDP - msg.CmdlineTuple = procs.ProcWatcher.FindProcessesTupleUDP(tuple) + msg.CmdlineTuple = mc.watcher.FindProcessesTupleUDP(tuple) done := false var err error diff --git a/packetbeat/protos/mongodb/mongodb.go b/packetbeat/protos/mongodb/mongodb.go index ac3e66dca5e..28a9350840e 100644 --- a/packetbeat/protos/mongodb/mongodb.go +++ b/packetbeat/protos/mongodb/mongodb.go @@ -47,6 +47,7 @@ type mongodbPlugin struct { transactionTimeout time.Duration results protos.Reporter + watcher procs.ProcessesWatcher } type transactionKey struct { @@ -65,6 +66,7 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { p := &mongodbPlugin{} @@ -75,13 +77,13 @@ func New( } } - if err := p.init(results, &config); err != nil { + if err := p.init(results, watcher, &config); err != nil { return nil, err } return p, nil } -func (mongodb *mongodbPlugin) init(results protos.Reporter, config *mongodbConfig) error { +func (mongodb *mongodbPlugin) init(results protos.Reporter, watcher procs.ProcessesWatcher, config *mongodbConfig) error { debugf("Init a MongoDB protocol parser") mongodb.setFromConfig(config) @@ -94,6 +96,7 @@ func (mongodb *mongodbPlugin) init(results protos.Reporter, config *mongodbConfi protos.DefaultTransactionHashSize) mongodb.responses.StartJanitor(mongodb.transactionTimeout) mongodb.results = results + mongodb.watcher = watcher return nil } @@ -218,7 +221,7 @@ func (mongodb *mongodbPlugin) handleMongodb( m.tcpTuple = *tcptuple m.direction = dir - m.cmdlineTuple = procs.ProcWatcher.FindProcessesTupleTCP(tcptuple.IPPort()) + m.cmdlineTuple = mongodb.watcher.FindProcessesTupleTCP(tcptuple.IPPort()) if m.isResponse { debugf("MongoDB response message") diff --git a/packetbeat/protos/mongodb/mongodb_test.go b/packetbeat/protos/mongodb/mongodb_test.go index 4bd16ec121d..53580594ff2 100644 --- a/packetbeat/protos/mongodb/mongodb_test.go +++ b/packetbeat/protos/mongodb/mongodb_test.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" ) @@ -46,7 +47,7 @@ func mongodbModForTests() (*eventStore, *mongodbPlugin) { var mongodb mongodbPlugin results := &eventStore{} config := defaultConfig - mongodb.init(results.publish, &config) + mongodb.init(results.publish, procs.ProcessesWatcher{}, &config) return results, &mongodb } diff --git a/packetbeat/protos/mysql/mysql.go b/packetbeat/protos/mysql/mysql.go index 4d08debf976..506b6c30ca8 100644 --- a/packetbeat/protos/mysql/mysql.go +++ b/packetbeat/protos/mysql/mysql.go @@ -158,6 +158,7 @@ type mysqlPlugin struct { prepareStatementTimeout time.Duration results protos.Reporter + watcher procs.ProcessesWatcher // function pointer for mocking handleMysql func(mysql *mysqlPlugin, m *mysqlMessage, tcp *common.TCPTuple, @@ -171,6 +172,7 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { p := &mysqlPlugin{} @@ -181,13 +183,13 @@ func New( } } - if err := p.init(results, &config); err != nil { + if err := p.init(results, watcher, &config); err != nil { return nil, err } return p, nil } -func (mysql *mysqlPlugin) init(results protos.Reporter, config *mysqlConfig) error { +func (mysql *mysqlPlugin) init(results protos.Reporter, watcher procs.ProcessesWatcher, config *mysqlConfig) error { mysql.setFromConfig(config) mysql.transactions = common.NewCache( @@ -203,6 +205,7 @@ func (mysql *mysqlPlugin) init(results protos.Reporter, config *mysqlConfig) err mysql.handleMysql = handleMysql mysql.results = results + mysql.watcher = watcher return nil } @@ -651,7 +654,7 @@ func handleMysql(mysql *mysqlPlugin, m *mysqlMessage, tcptuple *common.TCPTuple, m.tcpTuple = *tcptuple m.direction = dir - m.cmdlineTuple = procs.ProcWatcher.FindProcessesTupleTCP(tcptuple.IPPort()) + m.cmdlineTuple = mysql.watcher.FindProcessesTupleTCP(tcptuple.IPPort()) m.raw = rawMsg if m.isRequest { diff --git a/packetbeat/protos/mysql/mysql_test.go b/packetbeat/protos/mysql/mysql_test.go index 9bdfdb2cf07..1cbce1df1af 100644 --- a/packetbeat/protos/mysql/mysql_test.go +++ b/packetbeat/protos/mysql/mysql_test.go @@ -31,6 +31,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/protos/tcp" "github.com/elastic/beats/v7/packetbeat/publish" @@ -60,7 +61,7 @@ func mysqlModForTests(store *eventStore) *mysqlPlugin { var mysql mysqlPlugin config := defaultConfig config.Ports = []int{serverPort} - mysql.init(callback, &config) + mysql.init(callback, procs.ProcessesWatcher{}, &config) return &mysql } diff --git a/packetbeat/protos/nfs/rpc.go b/packetbeat/protos/nfs/rpc.go index f115cf19fba..9cde7ab5aac 100644 --- a/packetbeat/protos/nfs/rpc.go +++ b/packetbeat/protos/nfs/rpc.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/protos/tcp" ) @@ -70,6 +71,7 @@ func init() { func New( testMode bool, results protos.Reporter, + _ procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { p := &rpc{} diff --git a/packetbeat/protos/pgsql/pgsql.go b/packetbeat/protos/pgsql/pgsql.go index 69bfb468887..5ad6f6e305a 100644 --- a/packetbeat/protos/pgsql/pgsql.go +++ b/packetbeat/protos/pgsql/pgsql.go @@ -49,6 +49,7 @@ type pgsqlPlugin struct { transactionTimeout time.Duration results protos.Reporter + watcher procs.ProcessesWatcher // function pointer for mocking handlePgsql func(pgsql *pgsqlPlugin, m *pgsqlMessage, tcp *common.TCPTuple, @@ -140,6 +141,7 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { p := &pgsqlPlugin{} @@ -150,13 +152,13 @@ func New( } } - if err := p.init(results, &config); err != nil { + if err := p.init(results, watcher, &config); err != nil { return nil, err } return p, nil } -func (pgsql *pgsqlPlugin) init(results protos.Reporter, config *pgsqlConfig) error { +func (pgsql *pgsqlPlugin) init(results protos.Reporter, watcher procs.ProcessesWatcher, config *pgsqlConfig) error { pgsql.setFromConfig(config) pgsql.log = logp.NewLogger("pgsql") @@ -170,6 +172,7 @@ func (pgsql *pgsqlPlugin) init(results protos.Reporter, config *pgsqlConfig) err pgsql.transactions.StartJanitor(pgsql.transactionTimeout) pgsql.handlePgsql = handlePgsql pgsql.results = results + pgsql.watcher = watcher return nil } @@ -379,7 +382,7 @@ var handlePgsql = func(pgsql *pgsqlPlugin, m *pgsqlMessage, tcptuple *common.TCP m.tcpTuple = *tcptuple m.direction = dir - m.cmdlineTuple = procs.ProcWatcher.FindProcessesTupleTCP(tcptuple.IPPort()) + m.cmdlineTuple = pgsql.watcher.FindProcessesTupleTCP(tcptuple.IPPort()) if m.isRequest { pgsql.receivedPgsqlRequest(m) diff --git a/packetbeat/protos/pgsql/pgsql_test.go b/packetbeat/protos/pgsql/pgsql_test.go index 356d367c391..199c2fc3b38 100644 --- a/packetbeat/protos/pgsql/pgsql_test.go +++ b/packetbeat/protos/pgsql/pgsql_test.go @@ -31,6 +31,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/publish" ) @@ -56,7 +57,7 @@ func pgsqlModForTests(store *eventStore) *pgsqlPlugin { var pgsql pgsqlPlugin config := defaultConfig - pgsql.init(callback, &config) + pgsql.init(callback, procs.ProcessesWatcher{}, &config) return &pgsql } diff --git a/packetbeat/protos/protos.go b/packetbeat/protos/protos.go index 9991458eb2b..e0343a0ee87 100644 --- a/packetbeat/protos/protos.go +++ b/packetbeat/protos/protos.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" ) const ( @@ -92,11 +93,12 @@ type ProtocolsStruct struct { udp map[Protocol]UDPPlugin } -// Singleton of Protocols type. -var Protos = ProtocolsStruct{ - all: map[Protocol]protocolInstance{}, - tcp: map[Protocol]TCPPlugin{}, - udp: map[Protocol]UDPPlugin{}, +func NewProtocols() *ProtocolsStruct { + return &ProtocolsStruct{ + all: map[Protocol]protocolInstance{}, + tcp: map[Protocol]TCPPlugin{}, + udp: map[Protocol]UDPPlugin{}, + } } type protocolInstance struct { @@ -111,6 +113,7 @@ type reporterFactory interface { func (s ProtocolsStruct) Init( testMode bool, pub reporterFactory, + watcher procs.ProcessesWatcher, configs map[string]*common.Config, listConfigs []*common.Config, ) error { @@ -123,7 +126,7 @@ func (s ProtocolsStruct) Init( } for name, config := range configs { - if err := s.configureProtocol(testMode, pub, name, config); err != nil { + if err := s.configureProtocol(testMode, pub, watcher, name, config); err != nil { return err } } @@ -136,7 +139,7 @@ func (s ProtocolsStruct) Init( return err } - if err := s.configureProtocol(testMode, pub, module.Name, config); err != nil { + if err := s.configureProtocol(testMode, pub, watcher, module.Name, config); err != nil { return err } } @@ -147,6 +150,7 @@ func (s ProtocolsStruct) Init( func (s ProtocolsStruct) configureProtocol( testMode bool, pub reporterFactory, + watcher procs.ProcessesWatcher, name string, config *common.Config, ) error { @@ -182,7 +186,7 @@ func (s ProtocolsStruct) configureProtocol( } } - inst, err := plugin(testMode, results, config) + inst, err := plugin(testMode, results, watcher, config) if err != nil { logp.Err("Failed to register protocol plugin: %v", err) return err diff --git a/packetbeat/protos/redis/redis.go b/packetbeat/protos/redis/redis.go index bf23e94836f..23dd1ad8696 100644 --- a/packetbeat/protos/redis/redis.go +++ b/packetbeat/protos/redis/redis.go @@ -55,6 +55,7 @@ type redisPlugin struct { transactionTimeout time.Duration queueConfig MessageQueueConfig + watcher procs.ProcessesWatcher results protos.Reporter } @@ -75,6 +76,7 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { p := &redisPlugin{} @@ -85,16 +87,17 @@ func New( } } - if err := p.init(results, &config); err != nil { + if err := p.init(results, watcher, &config); err != nil { return nil, err } return p, nil } -func (redis *redisPlugin) init(results protos.Reporter, config *redisConfig) error { +func (redis *redisPlugin) init(results protos.Reporter, watcher procs.ProcessesWatcher, config *redisConfig) error { redis.setFromConfig(config) redis.results = results + redis.watcher = watcher isDebug = logp.IsDebug("redis") return nil @@ -247,7 +250,7 @@ func (redis *redisPlugin) handleRedis( ) { m.tcpTuple = *tcptuple m.direction = dir - m.cmdlineTuple = procs.ProcWatcher.FindProcessesTupleTCP(tcptuple.IPPort()) + m.cmdlineTuple = redis.watcher.FindProcessesTupleTCP(tcptuple.IPPort()) if m.isRequest { // wait for response diff --git a/packetbeat/protos/registry.go b/packetbeat/protos/registry.go index f1fc17b7074..1d1bd2c7b88 100644 --- a/packetbeat/protos/registry.go +++ b/packetbeat/protos/registry.go @@ -22,11 +22,14 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" + + "github.com/elastic/beats/v7/packetbeat/procs" ) type ProtocolPlugin func( testMode bool, results Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (Plugin, error) diff --git a/packetbeat/protos/sip/parser.go b/packetbeat/protos/sip/parser.go index 55e66045e95..7ee5a10bb5b 100644 --- a/packetbeat/protos/sip/parser.go +++ b/packetbeat/protos/sip/parser.go @@ -87,6 +87,7 @@ const ( ) type parser struct { + watcher procs.ProcessesWatcher } type parsingInfo struct { @@ -117,15 +118,17 @@ var ( nameVia = []byte("via") ) -func newParser() *parser { - return &parser{} +func newParser(watcher procs.ProcessesWatcher) *parser { + return &parser{ + watcher: watcher, + } } func (parser *parser) parse(pi *parsingInfo) (*message, error) { m := &message{ ts: pi.pkt.Ts, ipPortTuple: pi.pkt.Tuple, - cmdlineTuple: procs.ProcWatcher.FindProcessesTupleTCP(&pi.pkt.Tuple), + cmdlineTuple: parser.watcher.FindProcessesTupleTCP(&pi.pkt.Tuple), rawData: pi.data, } for pi.parseOffset < len(pi.data) { diff --git a/packetbeat/protos/sip/plugin.go b/packetbeat/protos/sip/plugin.go index 7da4d0f1e7e..b9b1264f967 100644 --- a/packetbeat/protos/sip/plugin.go +++ b/packetbeat/protos/sip/plugin.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/packetbeat/pb" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" ) @@ -45,6 +46,7 @@ type plugin struct { keepOriginal bool results protos.Reporter + watcher procs.ProcessesWatcher } var ( @@ -59,6 +61,7 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { cfgwarn.Beta("packetbeat SIP protocol is used") @@ -71,19 +74,20 @@ func New( } } - if err := p.init(results, &config); err != nil { + if err := p.init(results, watcher, &config); err != nil { return nil, err } return p, nil } // Init initializes the HTTP protocol analyser. -func (p *plugin) init(results protos.Reporter, config *config) error { +func (p *plugin) init(results protos.Reporter, watcher procs.ProcessesWatcher, config *config) error { p.setFromConfig(config) isDebug = logp.IsDebug("sip") isDetailed = logp.IsDebug("sipdetailed") p.results = results + p.watcher = watcher return nil } @@ -111,7 +115,7 @@ func (p *plugin) doParse(pkt *protos.Packet) error { detailedf("Payload received: [%s]", pkt.Payload) } - parser := newParser() + parser := newParser(p.watcher) pi := newParsingInfo(pkt) m, err := parser.parse(pi) diff --git a/packetbeat/protos/sip/plugin_test.go b/packetbeat/protos/sip/plugin_test.go index d8c09f5b307..5b09f522aff 100644 --- a/packetbeat/protos/sip/plugin_test.go +++ b/packetbeat/protos/sip/plugin_test.go @@ -27,6 +27,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" ) @@ -114,7 +115,7 @@ func TestParseUDP(t *testing.T) { gotEvent = &evt } const data = "INVITE sip:test@10.0.2.15:5060 SIP/2.0\r\nVia: SIP/2.0/UDP 10.0.2.20:5060;branch=z9hG4bK-2187-1-0\r\nFrom: \"DVI4/8000\" ;tag=1\r\nTo: test \r\nCall-ID: 1-2187@10.0.2.20\r\nCSeq: 1 INVITE\r\nContact: sip:sipp@10.0.2.20:5060\r\nMax-Forwards: 70\r\nContent-Type: application/sdp\r\nContent-Length: 123\r\n\r\nv=0\r\no=- 42 42 IN IP4 10.0.2.20\r\ns=-\r\nc=IN IP4 10.0.2.20\r\nt=0 0\r\nm=audio 6000 RTP/AVP 5\r\na=rtpmap:5 DVI4/8000\r\na=recvonly\r\n" - p, _ := New(true, reporter, nil) + p, _ := New(true, reporter, procs.ProcessesWatcher{}, nil) plugin := p.(*plugin) plugin.ParseUDP(&protos.Packet{ Ts: time.Now(), diff --git a/packetbeat/protos/tcp/tcp_test.go b/packetbeat/protos/tcp/tcp_test.go index c3461172937..4ec5559f3f6 100644 --- a/packetbeat/protos/tcp/tcp_test.go +++ b/packetbeat/protos/tcp/tcp_test.go @@ -26,6 +26,7 @@ import ( "time" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/stretchr/testify/assert" @@ -44,7 +45,7 @@ var ( ) func init() { - new := func(_ bool, _ protos.Reporter, _ *common.Config) (protos.Plugin, error) { + new := func(_ bool, _ protos.Reporter, _ procs.ProcessesWatcher, _ *common.Config) (protos.Plugin, error) { return &TestProtocol{}, nil } diff --git a/packetbeat/protos/thrift/thrift.go b/packetbeat/protos/thrift/thrift.go index 8c15b9bdf9c..d9778031d76 100644 --- a/packetbeat/protos/thrift/thrift.go +++ b/packetbeat/protos/thrift/thrift.go @@ -57,6 +57,7 @@ type thriftPlugin struct { publishQueue chan *thriftTransaction results protos.Reporter + watcher procs.ProcessesWatcher idl *thriftIdl } @@ -182,6 +183,7 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { p := &thriftPlugin{} @@ -192,7 +194,7 @@ func New( } } - if err := p.init(testMode, results, &config); err != nil { + if err := p.init(testMode, results, watcher, &config); err != nil { return nil, err } return p, nil @@ -201,6 +203,7 @@ func New( func (thrift *thriftPlugin) init( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, config *thriftConfig, ) error { thrift.InitDefaults() @@ -218,6 +221,7 @@ func (thrift *thriftPlugin) init( if !testMode { thrift.publishQueue = make(chan *thriftTransaction, 1000) thrift.results = results + thrift.watcher = watcher go thrift.publishTransactions() } @@ -894,7 +898,7 @@ func (thrift *thriftPlugin) messageComplete(tcptuple *common.TCPTuple, dir uint8 // all ok, go to next level stream.message.tcpTuple = *tcptuple stream.message.direction = dir - stream.message.cmdlineTuple = procs.ProcWatcher.FindProcessesTupleTCP(tcptuple.IPPort()) + stream.message.cmdlineTuple = thrift.watcher.FindProcessesTupleTCP(tcptuple.IPPort()) if stream.message.frameSize == 0 { stream.message.frameSize = uint32(stream.parseOffset - stream.message.start) } diff --git a/packetbeat/protos/thrift/thrift_test.go b/packetbeat/protos/thrift/thrift_test.go index 2c6618bab77..e1eca793e42 100644 --- a/packetbeat/protos/thrift/thrift_test.go +++ b/packetbeat/protos/thrift/thrift_test.go @@ -26,13 +26,14 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" ) func thriftForTests() *thriftPlugin { t := &thriftPlugin{} config := defaultConfig - t.init(true, nil, &config) + t.init(true, nil, procs.ProcessesWatcher{}, &config) return t } diff --git a/packetbeat/protos/tls/tls.go b/packetbeat/protos/tls/tls.go index 74034c4afaf..e91c78d69b8 100644 --- a/packetbeat/protos/tls/tls.go +++ b/packetbeat/protos/tls/tls.go @@ -60,6 +60,7 @@ type tlsPlugin struct { fingerprints []*FingerprintAlgorithm transactionTimeout time.Duration results protos.Reporter + watcher procs.ProcessesWatcher } var ( @@ -78,6 +79,7 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { p := &tlsPlugin{} @@ -88,18 +90,19 @@ func New( } } - if err := p.init(results, &config); err != nil { + if err := p.init(results, watcher, &config); err != nil { return nil, err } return p, nil } -func (plugin *tlsPlugin) init(results protos.Reporter, config *tlsConfig) error { +func (plugin *tlsPlugin) init(results protos.Reporter, watcher procs.ProcessesWatcher, config *tlsConfig) error { if err := plugin.setFromConfig(config); err != nil { return err } plugin.results = results + plugin.watcher = watcher isDebug = logp.IsDebug("tls") return nil @@ -178,7 +181,7 @@ func (plugin *tlsPlugin) doParse( st := conn.streams[dir] if st == nil { st = newStream(tcptuple) - st.cmdlineTuple = procs.ProcWatcher.FindProcessesTupleTCP(tcptuple.IPPort()) + st.cmdlineTuple = plugin.watcher.FindProcessesTupleTCP(tcptuple.IPPort()) conn.streams[dir] = st } diff --git a/packetbeat/protos/tls/tls_test.go b/packetbeat/protos/tls/tls_test.go index 64a79024806..4e90d5aaf3c 100644 --- a/packetbeat/protos/tls/tls_test.go +++ b/packetbeat/protos/tls/tls_test.go @@ -30,6 +30,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/publish" ) @@ -66,7 +67,7 @@ func testInit() (*eventStore, *tlsPlugin) { logp.TestingSetup(logp.WithSelectors("tls", "tlsdetailed")) results := &eventStore{} - tls, err := New(true, results.publish, nil) + tls, err := New(true, results.publish, procs.ProcessesWatcher{}, nil) if err != nil { return nil, nil } diff --git a/packetbeat/publish/publish.go b/packetbeat/publish/publish.go index 7890a1c173d..bd904030b59 100644 --- a/packetbeat/publish/publish.go +++ b/packetbeat/publish/publish.go @@ -84,6 +84,7 @@ func (p *TransactionPublisher) CreateReporter( // load and register the module it's fields, tags and processors settings meta := struct { + Index string `config:"index"` Event common.EventMetadata `config:",inline"` Processors processors.PluginConfig `config:"processors"` KeepNull bool `config:"keep_null"` @@ -107,6 +108,9 @@ func (p *TransactionPublisher) CreateReporter( if p.canDrop { clientConfig.PublishMode = beat.DropIfFull } + if meta.Index != "" { + clientConfig.Processing.Meta = common.MapStr{"index": meta.Index} + } client, err := p.pipeline.ConnectWith(clientConfig) if err != nil { diff --git a/packetbeat/scripts/tcp-protocol/{protocol}/trans.go.tmpl b/packetbeat/scripts/tcp-protocol/{protocol}/trans.go.tmpl index 4f7ad362bfe..c25d06f0d65 100644 --- a/packetbeat/scripts/tcp-protocol/{protocol}/trans.go.tmpl +++ b/packetbeat/scripts/tcp-protocol/{protocol}/trans.go.tmpl @@ -16,6 +16,8 @@ type transactions struct { responses messageList onTransaction transactionHandler + + watcher procs.ProcessesWatcher } type transactionConfig struct { @@ -29,8 +31,9 @@ type messageList struct { head, tail *message } -func (trans *transactions) init(c *transactionConfig, cb transactionHandler) { +func (trans *transactions) init(c *transactionConfig, watcher procs.ProcessesWatcher, cb transactionHandler) { trans.config = c + trans.watcher = watcher trans.onTransaction = cb } @@ -43,7 +46,7 @@ func (trans *transactions) onMessage( msg.Tuple = *tuple msg.Transport = applayer.TransportTCP - msg.CmdlineTuple = procs.ProcWatcher.FindProcessesTuple(&msg.Tuple) + msg.CmdlineTuple = trans.watcher.FindProcessesTuple(&msg.Tuple) if msg.IsRequest { if isDebug { diff --git a/packetbeat/scripts/tcp-protocol/{protocol}/{protocol}.go.tmpl b/packetbeat/scripts/tcp-protocol/{protocol}/{protocol}.go.tmpl index f783e840301..8af08842fd4 100644 --- a/packetbeat/scripts/tcp-protocol/{protocol}/{protocol}.go.tmpl +++ b/packetbeat/scripts/tcp-protocol/{protocol}/{protocol}.go.tmpl @@ -6,6 +6,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/protos/tcp" ) @@ -15,6 +16,7 @@ type {plugin_type} struct { ports protos.PortsConfig parserConfig parserConfig transConfig transactionConfig + watcher procs.ProcessesWatcher pub transPub } @@ -45,6 +47,7 @@ func init() { func New( testMode bool, results protos.Reporter, + watcher procs.ProcessesWatcher, cfg *common.Config, ) (protos.Plugin, error) { p := &{plugin_type}{} @@ -55,17 +58,18 @@ func New( } } - if err := p.init(results, &config); err != nil { + if err := p.init(results, watcher, &config); err != nil { return nil, err } return p, nil } -func ({plugin_var} *{plugin_type}) init(results protos.Reporter, config *{protocol}Config) error { +func ({plugin_var} *{plugin_type}) init(results protos.Reporter, watcher procs.ProcessesWatcher, config *{protocol}Config) error { if err := {plugin_var}.setFromConfig(config); err != nil { return err } {plugin_var}.pub.results = results + {plugin_var}.watcher = watcher isDebug = logp.IsDebug("http") return nil @@ -162,7 +166,7 @@ func ({plugin_var} *{plugin_type}) ensureConnection(private protos.ProtocolData) conn := getConnection(private) if conn == nil { conn = &connection{} - conn.trans.init(&{plugin_var}.transConfig, {plugin_var}.pub.onTransaction) + conn.trans.init(&{plugin_var}.transConfig, {plugin_var}.watcher, {plugin_var}.pub.onTransaction) } return conn } diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml new file mode 100644 index 00000000000..42888c1ec53 --- /dev/null +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -0,0 +1,2092 @@ +###################### Packetbeat Configuration Example ####################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see packetbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/packetbeat/index.html + +# =============================== Network device =============================== + +# Select the network interface to sniff the data. You can use the "any" +# keyword to sniff on all connected interfaces. +packetbeat.interfaces.device: any + +# Packetbeat supports three sniffer types: +# * pcap, which uses the libpcap library and works on most platforms, but it's +# not the fastest option. +# * af_packet, which uses memory-mapped sniffing. This option is faster than +# libpcap and doesn't require a kernel module, but it's Linux-specific. +#packetbeat.interfaces.type: pcap + +# The maximum size of the packets to capture. The default is 65535, which is +# large enough for almost all networks and interface types. If you sniff on a +# physical network interface, the optimal setting is the MTU size. On virtual +# interfaces, however, it's safer to accept the default value. +#packetbeat.interfaces.snaplen: 65535 + +# The maximum size of the shared memory buffer to use between the kernel and +# user space. A bigger buffer usually results in lower CPU usage, but consumes +# more memory. This setting is only available for the af_packet sniffer type. +# The default is 30 MB. +#packetbeat.interfaces.buffer_size_mb: 30 + +# Packetbeat automatically generates a BPF for capturing only the traffic on +# ports where it expects to find known protocols. Use this settings to tell +# Packetbeat to generate a BPF filter that accepts VLAN tags. +#packetbeat.interfaces.with_vlans: true + +# Use this setting to override the automatically generated BPF filter. +#packetbeat.interfaces.bpf_filter: + +# With `auto_promisc_mode` Packetbeat puts interface in promiscuous mode automatically on startup. +# This option does not work with `any` interface device. +# The default option is false and requires manual set-up of promiscuous mode. +# Warning: under some circumstances (e.g beat crash) promiscuous mode +# can stay enabled even after beat is shut down. +#packetbeat.interfaces.auto_promisc_mode: true + +# =================================== Flows ==================================== + +packetbeat.flows: + # Enable Network flows. Default: true + #enabled: true + + # Set network flow timeout. Flow is killed if no packet is received before being + # timed out. + timeout: 30s + + # Configure reporting period. If set to -1, only killed flows will be reported + period: 10s + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Overrides where flow events are indexed. + #index: my-custom-flow-index + +# =========================== Transaction protocols ============================ + +packetbeat.protocols: +- type: icmp + # Enable ICMPv4 and ICMPv6 monitoring. Default: true + #enabled: true + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Overrides where this protocol's events are indexed. + #index: my-custom-icmp-index + +- type: amqp + # Enable AMQP monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for AMQP traffic. You can disable + # the AMQP protocol by commenting out the list of ports. + ports: [5672] + # Truncate messages that are published and avoid huge messages being + # indexed. + # Default: 1000 + #max_body_length: 1000 + + # Hide the header fields in header frames. + # Default: false + #parse_headers: false + + # Hide the additional arguments of method frames. + # Default: false + #parse_arguments: false + + # Hide all methods relative to connection negotiation between server and + # client. + # Default: true + #hide_connection_information: true + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Overrides where this protocol's events are indexed. + #index: my-custom-amqp-index + +- type: cassandra + #Cassandra port for traffic monitoring. + ports: [9042] + + # If this option is enabled, the raw message of the request (`cassandra_request` field) + # is included in published events. The default is true. + #send_request: true + + # If this option is enabled, the raw message of the response (`cassandra_request.request_headers` field) + # is included in published events. The default is true. enable `send_request` first before enable this option. + #send_request_header: true + + # If this option is enabled, the raw message of the response (`cassandra_response` field) + # is included in published events. The default is true. + #send_response: true + + # If this option is enabled, the raw message of the response (`cassandra_response.response_headers` field) + # is included in published events. The default is true. enable `send_response` first before enable this option. + #send_response_header: true + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Configures the default compression algorithm being used to uncompress compressed frames by name. Currently only `snappy` is can be configured. + # By default no compressor is configured. + #compressor: "snappy" + + # This option indicates which Operator/Operators will be ignored. + #ignored_ops: ["SUPPORTED","OPTIONS"] + + # Overrides where this protocol's events are indexed. + #index: my-custom-cassandra-index + +- type: dhcpv4 + # Configure the DHCP for IPv4 ports. + ports: [67, 68] + + # Set to true to publish fields with null values in events. + #keep_null: false + +- type: dns + # Enable DNS monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for DNS traffic. You can disable + # the DNS protocol by commenting out the list of ports. + ports: [53] + + # include_authorities controls whether or not the dns.authorities field + # (authority resource records) is added to messages. + # Default: false + include_authorities: true + # include_additionals controls whether or not the dns.additionals field + # (additional resource records) is added to messages. + # Default: false + include_additionals: true + + # send_request and send_response control whether or not the stringified DNS + # request and response message are added to the result. + # Nearly all data about the request/response is available in the dns.* + # fields, but this can be useful if you need visibility specifically + # into the request or the response. + # Default: false + # send_request: true + # send_response: true + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Overrides where this protocol's events are indexed. + #index: my-custom-dhcpv4-index + +- type: http + # Enable HTTP monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for HTTP traffic. You can disable + # the HTTP protocol by commenting out the list of ports. + ports: [80, 8080, 8000, 5000, 8002] + + # Uncomment the following to hide certain parameters in URL or forms attached + # to HTTP requests. The names of the parameters are case insensitive. + # The value of the parameters will be replaced with the 'xxxxx' string. + # This is generally useful for avoiding storing user passwords or other + # sensitive information. + # Only query parameters and top level form parameters are replaced. + # hide_keywords: ['pass', 'password', 'passwd'] + + # A list of header names to capture and send to Elasticsearch. These headers + # are placed under the `headers` dictionary in the resulting JSON. + #send_headers: false + + # Instead of sending a white list of headers to Elasticsearch, you can send + # all headers by setting this option to true. The default is false. + #send_all_headers: false + + # A list of headers to redact if present in the HTTP request. This will keep + # the header field present, but will redact it's value to show the headers + # presence. + #redact_headers: [] + + # The list of content types for which Packetbeat includes the full HTTP + # payload. If the request's or response's Content-Type matches any on this + # list, the full body will be included under the request or response field. + #include_body_for: [] + + # The list of content types for which Packetbeat includes the full HTTP + # request payload. + #include_request_body_for: [] + + # The list of content types for which Packetbeat includes the full HTTP + # response payload. + #include_response_body_for: [] + + # Whether the body of a request must be decoded when a content-encoding + # or transfer-encoding has been applied. + #decode_body: true + + # If the Cookie or Set-Cookie headers are sent, this option controls whether + # they are split into individual values. + #split_cookie: false + + # The header field to extract the real IP from. This setting is useful when + # you want to capture traffic behind a reverse proxy, but you want to get the + # geo-location information. + #real_ip_header: + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Maximum message size. If an HTTP message is larger than this, it will + # be trimmed to this size. Default is 10 MB. + #max_message_size: 10485760 + + # Overrides where this protocol's events are indexed. + #index: my-custom-http-index + +- type: memcache + # Enable memcache monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for memcache traffic. You can disable + # the Memcache protocol by commenting out the list of ports. + ports: [11211] + + # Uncomment the parseunknown option to force the memcache text protocol parser + # to accept unknown commands. + # Note: All unknown commands MUST not contain any data parts! + # Default: false + # parseunknown: true + + # Update the maxvalue option to store the values - base64 encoded - in the + # json output. + # possible values: + # maxvalue: -1 # store all values (text based protocol multi-get) + # maxvalue: 0 # store no values at all + # maxvalue: N # store up to N values + # Default: 0 + # maxvalues: -1 + + # Use maxbytespervalue to limit the number of bytes to be copied per value element. + # Note: Values will be base64 encoded, so actual size in json document + # will be 4 times maxbytespervalue. + # Default: unlimited + # maxbytespervalue: 100 + + # UDP transaction timeout in milliseconds. + # Note: Quiet messages in UDP binary protocol will get response only in error case. + # The memcached analyzer will wait for udptransactiontimeout milliseconds + # before publishing quiet messages. Non quiet messages or quiet requests with + # error response will not have to wait for the timeout. + # Default: 200 + # udptransactiontimeout: 1000 + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Overrides where this protocol's events are indexed. + #index: my-custom-memcache-index + +- type: mysql + # Enable mysql monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for MySQL traffic. You can disable + # the MySQL protocol by commenting out the list of ports. + ports: [3306,3307] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Overrides where this protocol's events are indexed. + #index: my-custom-mysql-index + +- type: pgsql + # Enable pgsql monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for Pgsql traffic. You can disable + # the Pgsql protocol by commenting out the list of ports. + ports: [5432] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Overrides where this protocol's events are indexed. + #index: my-custom-pgsql-index + +- type: redis + # Enable redis monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for Redis traffic. You can disable + # the Redis protocol by commenting out the list of ports. + ports: [6379] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Max size for per-session message queue. This places a limit on the memory + # that can be used to buffer requests and responses for correlation. + #queue_max_bytes: 1048576 + + # Max number of messages for per-session message queue. This limits the number + # of requests or responses that can be buffered for correlation. Set a value + # large enough to allow for pipelining. + #queue_max_messages: 20000 + + # Overrides where this protocol's events are indexed. + #index: my-custom-redis-index + +- type: thrift + # Enable thrift monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for Thrift-RPC traffic. You can disable + # the Thrift-RPC protocol by commenting out the list of ports. + ports: [9090] + + # The Thrift transport type. Currently this option accepts the values socket + # for TSocket, which is the default Thrift transport, and framed for the + # TFramed Thrift transport. The default is socket. + #transport_type: socket + + # The Thrift protocol type. Currently the only accepted value is binary for + # the TBinary protocol, which is the default Thrift protocol. + #protocol_type: binary + + # The Thrift interface description language (IDL) files for the service that + # Packetbeat is monitoring. Providing the IDL enables Packetbeat to include + # parameter and exception names. + #idl_files: [] + + # The maximum length for strings in parameters or return values. If a string + # is longer than this value, the string is automatically truncated to this + # length. + #string_max_size: 200 + + # The maximum number of elements in a Thrift list, set, map, or structure. + #collection_max_size: 15 + + # If this option is set to false, Packetbeat decodes the method name from the + # reply and simply skips the rest of the response message. + #capture_reply: true + + # If this option is set to true, Packetbeat replaces all strings found in + # method parameters, return codes, or exception structures with the "*" + # string. + #obfuscate_strings: false + + # The maximum number of fields that a structure can have before Packetbeat + # ignores the whole transaction. + #drop_after_n_struct_fields: 500 + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Overrides where this protocol's events are indexed. + #index: my-custom-thrift-index + +- type: mongodb + # Enable mongodb monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for MongoDB traffic. You can disable + # the MongoDB protocol by commenting out the list of ports. + ports: [27017] + + + # The maximum number of documents from the response to index in the `response` + # field. The default is 10. + #max_docs: 10 + + # The maximum number of characters in a single document indexed in the + # `response` field. The default is 5000. You can set this to 0 to index an + # unlimited number of characters per document. + #max_doc_length: 5000 + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Overrides where this protocol's events are indexed. + #index: my-custom-mongodb-index + +- type: nfs + # Enable NFS monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for NFS traffic. You can disable + # the NFS protocol by commenting out the list of ports. + ports: [2049] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Overrides where this protocol's events are indexed. + #index: my-custom-nfs-index + +- type: tls + # Enable TLS monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for TLS traffic. You can disable + # the TLS protocol by commenting out the list of ports. + ports: + - 443 # HTTPS + - 993 # IMAPS + - 995 # POP3S + - 5223 # XMPP over SSL + - 8443 + - 8883 # Secure MQTT + - 9243 # Elasticsearch + + # List of hash algorithms to use to calculate certificates' fingerprints. + # Valid values are `sha1`, `sha256` and `md5`. + #fingerprints: [sha1] + + # If this option is enabled, the client and server certificates and + # certificate chains are sent to Elasticsearch. The default is true. + #send_certificates: true + + # If this option is enabled, the raw certificates will be stored + # in PEM format under the `raw` key. The default is false. + #include_raw_certificates: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Overrides where this protocol's events are indexed. + #index: my-custom-tls-index + +- type: sip + # Configure the ports where to listen for SIP traffic. You can disable the SIP protocol by commenting out the list of ports. + ports: [5060] + + # Parse the authorization headers + parse_authorization: true + + # Parse body contents (only when body is SDP) + parse_body: true + + # Preserve original contents in event.original + keep_original: true + + # Overrides where this protocol's events are indexed. + #index: my-custom-sip-index + +# ============================ Monitored processes ============================= + +# Packetbeat can enrich events with information about the process associated +# the socket that sent or received the packet if Packetbeat is monitoring +# traffic from the host machine. By default process enrichment is disabled. +# This feature works on Linux and Windows. +packetbeat.procs.enabled: false + +# If you want to ignore transactions created by the server on which the shipper +# is installed you can enable this option. This option is useful to remove +# duplicates if shippers are installed on multiple servers. Default value is +# false. +packetbeat.ignore_outgoing: false + +# ================================== General =================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # The default value is set to 2048. + # A value of 0 ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < `flush.min_events`. + #flush.timeout: 1s + + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +# ================================= Processors ================================= + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, +# decode_json_fields, and add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +# - include_fields: +# fields: ["cpu"] +# - drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +# - drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example renames the field a to b: +# +#processors: +# - rename: +# fields: +# - from: "a" +# to: "b" +# +# The following example tokenizes the string into fields: +# +#processors: +# - dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +# - add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +# - add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +# - add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +# - add_docker_metadata: ~ +# +# The following example enriches each event with host metadata. +# +#processors: +# - add_host_metadata: ~ +# +# The following example enriches each event with process metadata using +# process IDs included in the event. +# +#processors: +# - add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent +# +# The following example decodes fields containing JSON strings +# and replaces the strings with valid JSON objects. +# +#processors: +# - decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +# +#processors: +# - decompress_gzip_field: +# from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true +# +# The following example copies the value of message to message_copied +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: message_copied +# fail_on_error: true +# ignore_missing: false +# +# The following example truncates the value of message to 1024 bytes +# +#processors: +# - truncate_fields: +# fields: +# - message +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example preserves the raw message under event.original +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: event.original +# fail_on_error: false +# ignore_missing: true +# - truncate_fields: +# fields: +# - event.original +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example URL-decodes the value of field1 to field2 +# +#processors: +# - urldecode: +# fields: +# - from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true + +# =============================== Elastic Cloud ================================ + +# These settings simplify using Packetbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +# ================================== Outputs =================================== + +# Configure what output to use when sending the data collected by the beat. + +# ---------------------------- Elasticsearch Output ---------------------------- +output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the URL with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "packetbeat" plus date + # and generates [packetbeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "packetbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server URL + #proxy_url: http://proxy:3128 + + # Whether to disable proxy settings for outgoing connections. If true, this + # takes precedence over both the proxy_url field and any environment settings + # (HTTP_PROXY, HTTPS_PROXY). The default is false. + #proxy_disable: false + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure HTTP request timeout before failing a request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# ------------------------------ Logstash Output ------------------------------- +#output.logstash: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set) + #ttl: 30s + + # Optionally load-balance events between Logstash hosts. Default is false. + #loadbalance: false + + # Number of batches to be sent asynchronously to Logstash while processing + # new batches. + #pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # transaction. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + #slow_start: false + + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + + # Optional index name. The default index name is set to packetbeat + # in all lowercase. + #index: 'packetbeat' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting + # and retry until all events are published. Set max_retries to a value less + # than 0 to retry until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Logstash request. The + # default is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Logstash server before + # timing out. The default is 30s. + #timeout: 30s + +# -------------------------------- Kafka Output -------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from which to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create a unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version Packetbeat is assumed to run against. Defaults to the "1.0.0". + #version: '1.0.0' + + # Configure JSON encoding + #codec.json: + # Pretty-print JSON event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Metadata update configuration. Metadata contains leader information + # used to decide which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Wait time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # Strategy for fetching the topics metadata from the broker. Default is false. + #full: false + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The number of seconds to wait before trying to republish to Kafka + # after a network error. After waiting backoff.init seconds, the Beat + # tries to republish. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful publish, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to republish to + # Kafka after a network error. The default is 60s. + #backoff.max: 60s + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # Duration to wait before sending bulk Kafka request. 0 is no delay. The default + # is 0. + #bulk_flush_frequency: 0s + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # Set the compression level. Currently only gzip provides a compression level + # between 0 and 9. The default value is chosen by the compression algorithm. + #compression_level: 4 + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/security/keytabs/kafka.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # The service name. Service principal name is contructed from + # service_name/hostname@realm. + #kerberos.service_name: kafka + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# -------------------------------- Redis Output -------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # The list of Redis servers to connect to. If load-balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + # The hosts setting supports redis and rediss urls with custom password like + # redis://:password@localhost:6379. + #hosts: ["localhost:6379"] + + # The name of the Redis list or channel the events are published to. The + # default is packetbeat. + #key: packetbeat + + # The password to authenticate to Redis with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The number of seconds to wait before trying to reconnect to Redis + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Redis after a network error. The default is 60s. + #backoff.max: 60s + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + +# -------------------------------- File Output --------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty-print JSON event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/packetbeat" + + # Name of the generated files. The default is `packetbeat` and it generates + # files: `packetbeat`, `packetbeat.1`, `packetbeat.2`, etc. + #filename: packetbeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every Packetbeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + +# ------------------------------- Console Output ------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty-print JSON event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + +# =================================== Paths ==================================== + +# The home path for the Packetbeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the Packetbeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the Packetbeat installation. This is the default base path +# for all the files in which Packetbeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a Packetbeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +# ================================== Keystore ================================== + +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + +# ================================= Dashboards ================================= + +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards are disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The directory from where to read the dashboards. The default is the `kibana` +# folder in the home path. +#setup.dashboards.directory: ${path.home}/kibana + +# The URL from where to download the dashboards archive. It is used instead of +# the directory if it has a value. +#setup.dashboards.url: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the directory when it has a value. +#setup.dashboards.file: + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#setup.dashboards.beat: packetbeat + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#setup.dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#setup.dashboards.index: + +# Always use the Kibana API for loading the dashboards instead of autodetecting +# how to install the dashboards by first querying Elasticsearch. +#setup.dashboards.always_kibana: false + +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + +# ================================== Template ================================== + +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +#setup.template.enabled: true + +# Select the kind of index template. From Elasticsearch 7.8, it is possible to +# use component templates. Available options: legacy, component, index. +# By default packetbeat uses the legacy index templates. +#setup.template.type: legacy + +# Template name. By default the template name is "packetbeat-%{[agent.version]}" +# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. +#setup.template.name: "packetbeat-%{[agent.version]}" + +# Template pattern. By default the template pattern is "-%{[agent.version]}-*" to apply to the default index settings. +# The first part is the version of the beat and then -* is used to match all daily indices. +# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. +#setup.template.pattern: "packetbeat-%{[agent.version]}-*" + +# Path to fields.yml file to generate the template +#setup.template.fields: "${path.config}/fields.yml" + +# A list of fields to be added to the template and Kibana index pattern. Also +# specify setup.template.overwrite: true to overwrite the existing template. +#setup.template.append_fields: +#- name: field_name +# type: field_type + +# Enable JSON template loading. If this is enabled, the fields.yml is ignored. +#setup.template.json.enabled: false + +# Path to the JSON template file +#setup.template.json.path: "${path.config}/template.json" + +# Name under which the template is stored in Elasticsearch +#setup.template.json.name: "" + +# Overwrite existing template +# Do not enable this option for more than one instance of packetbeat as it might +# overload your Elasticsearch with too many update requests. +#setup.template.overwrite: false + +# Elasticsearch template settings +setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + #index: + #number_of_shards: 1 + #codec: best_compression + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + #_source: + #enabled: false + +# ====================== Index Lifecycle Management (ILM) ====================== + +# Configure index lifecycle management (ILM). These settings create a write +# alias and add additional settings to the index template. When ILM is enabled, +# output.elasticsearch.index is ignored, and the write alias is used to set the +# index name. + +# Enable ILM support. Valid values are true, false, and auto. When set to auto +# (the default), the Beat uses index lifecycle management when it connects to a +# cluster that supports ILM; otherwise, it creates daily indices. +#setup.ilm.enabled: auto + +# Set the prefix used in the index lifecycle write alias name. The default alias +# name is 'packetbeat-%{[agent.version]}'. +#setup.ilm.rollover_alias: 'packetbeat' + +# Set the rollover index pattern. The default is "%{now/d}-000001". +#setup.ilm.pattern: "{now/d}-000001" + +# Set the lifecycle policy name. The default policy name is +# 'beatname'. +#setup.ilm.policy_name: "mypolicy" + +# The path to a JSON file that contains a lifecycle policy configuration. Used +# to load your own lifecycle policy. +#setup.ilm.policy_file: + +# Disable the check for an existing lifecycle policy. The default is true. If +# you disable this check, set setup.ilm.overwrite: true so the lifecycle policy +# can be installed. +#setup.ilm.check_exists: true + +# Overwrite the lifecycle policy at startup. The default is false. +#setup.ilm.overwrite: false + +# =================================== Kibana =================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP path + #path: "" + + # Optional Kibana space ID. + #space.id: "" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + +# ================================== Logging =================================== + +# There are four options for the log output: file, stderr, syslog, eventlog +# The file output is the default. + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to stderr. The default is false. +#logging.to_stderr: false + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: false + +# Send all logging output to Windows Event Logs. The default is false. +#logging.to_eventlog: false + +# If enabled, Packetbeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/packetbeat + + # The name of the files where the logs are written to. + #name: packetbeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending to the existing + # file. Defaults to true. + # rotateonstartup: true + +# Set to true to log messages in JSON format. +#logging.json: false + +# Set to true, to log messages with minimal required Elastic Common Schema (ECS) +# information. Recommended to use in combination with `logging.json=true` +# Defaults to false. +#logging.ecs: false + +# ============================= X-Pack Monitoring ============================== +# Packetbeat can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#monitoring.enabled: false + +# Sets the UUID of the Elasticsearch cluster under which monitoring data for this +# Packetbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch +# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch. +#monitoring.cluster_uuid: + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. +# Note that the settings should point to your Elasticsearch *monitoring* cluster. +# Any setting that is not set is automatically inherited from the Elasticsearch +# output configuration, so if you have the Elasticsearch output configured such +# that it is pointing to your Elasticsearch monitoring cluster, you can simply +# uncomment the following line. +#monitoring.elasticsearch: + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + #username: "beats_system" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the URL with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure HTTP request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + + #metrics.period: 10s + #state.period: 1m + +# The `monitoring.cloud.id` setting overwrites the `monitoring.elasticsearch.hosts` +# setting. You can find the value for this setting in the Elastic Cloud web UI. +#monitoring.cloud.id: + +# The `monitoring.cloud.auth` setting overwrites the `monitoring.elasticsearch.username` +# and `monitoring.elasticsearch.password` settings. The format is `:`. +#monitoring.cloud.auth: + +# =============================== HTTP Endpoint ================================ + +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + +# ============================== Process Security ============================== + +# Enable or disable seccomp system call filtering on Linux. Default is enabled. +#seccomp.enabled: true + +# ============================== Instrumentation =============================== + +# Instrumentation support for the packetbeat. +#instrumentation: + # Set to true to enable instrumentation of packetbeat. + #enabled: false + + # Environment in which packetbeat is running on (eg: staging, production, etc.) + #environment: "" + + # APM Server hosts to report instrumentation results to. + #hosts: + # - http://localhost:8200 + + # API Key for the APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the APM Server(s). + #secret_token: + + # Enable profiling of the server, recording profile samples as events. + # + # This feature is experimental. + #profiling: + #cpu: + # Set to true to enable CPU profiling. + #enabled: false + #interval: 60s + #duration: 10s + #heap: + # Set to true to enable heap profiling. + #enabled: false + #interval: 60s + +# ================================= Migration ================================== + +# This allows to enable 6.7 migration aliases +#migration.6_to_7.enabled: false +