From 93fdd0a2128eb468bd4f48ea6114ffeee2efc70f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Wed, 10 Mar 2021 17:44:23 +0100 Subject: [PATCH 1/8] Add unit tests for harvester.go of input-logfile package (#24107) ## What does this PR do? This PR adds unit tests for `readerGroup` and `defaultHarvesterGroup` of the `filestream` input. It also creates a separate error for if a Harvester is already running for a source. ## Why is it important? Test coverage for `harvester.go` is increased from 0% to 98.4%. Overall coverage of `github.com/elastic/beats/v7/filebeat/input/filestream/internal/input-logfile` is increased from 47.4% to 64.8%. --- .../internal/input-logfile/harvester.go | 15 +- .../internal/input-logfile/harvester_test.go | 325 ++++++++++++++++++ libbeat/tests/resources/goroutines.go | 33 +- 3 files changed, 364 insertions(+), 9 deletions(-) create mode 100644 filebeat/input/filestream/internal/input-logfile/harvester_test.go diff --git a/filebeat/input/filestream/internal/input-logfile/harvester.go b/filebeat/input/filestream/internal/input-logfile/harvester.go index 72635c194f4..5f926386aa7 100644 --- a/filebeat/input/filestream/internal/input-logfile/harvester.go +++ b/filebeat/input/filestream/internal/input-logfile/harvester.go @@ -19,6 +19,7 @@ package input_logfile import ( "context" + "errors" "fmt" "runtime/debug" "sync" @@ -32,6 +33,10 @@ import ( "github.com/elastic/go-concert/unison" ) +var ( + ErrHarvesterAlreadyRunning = errors.New("harvester is already running for file") +) + // Harvester is the reader which collects the lines from // the configured source. type Harvester interface { @@ -66,7 +71,7 @@ func (r *readerGroup) newContext(id string, cancelation v2.Canceler) (context.Co defer r.mu.Unlock() if _, ok := r.table[id]; ok { - return nil, nil, fmt.Errorf("harvester is already running for file") + return nil, nil, ErrHarvesterAlreadyRunning } ctx, cancel := context.WithCancel(ctxtool.FromCanceller(cancelation)) @@ -88,6 +93,14 @@ func (r *readerGroup) remove(id string) { delete(r.table, id) } +func (r *readerGroup) hasID(id string) bool { + r.mu.Lock() + defer r.mu.Unlock() + + _, ok := r.table[id] + return ok +} + // HarvesterGroup is responsible for running the // Harvesters started by the Prospector. type HarvesterGroup interface { diff --git a/filebeat/input/filestream/internal/input-logfile/harvester_test.go b/filebeat/input/filestream/internal/input-logfile/harvester_test.go new file mode 100644 index 00000000000..6bc6f2f72e6 --- /dev/null +++ b/filebeat/input/filestream/internal/input-logfile/harvester_test.go @@ -0,0 +1,325 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package input_logfile + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + input "github.com/elastic/beats/v7/filebeat/input/v2" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/tests/resources" + "github.com/elastic/beats/v7/x-pack/dockerlogbeat/pipelinemock" + "github.com/elastic/go-concert/unison" +) + +func TestReaderGroup(t *testing.T) { + requireGroupSuccess := func(t *testing.T, ctx context.Context, cf context.CancelFunc, err error) { + require.NotNil(t, ctx) + require.NotNil(t, cf) + require.Nil(t, err) + } + + requireGroupError := func(t *testing.T, ctx context.Context, cf context.CancelFunc, err error) { + require.Nil(t, ctx) + require.Nil(t, cf) + require.Error(t, err) + } + + t.Run("assert new group is empty", func(t *testing.T) { + rg := newReaderGroup() + require.Equal(t, 0, len(rg.table)) + }) + + t.Run("assert non existent key can be removed", func(t *testing.T) { + rg := newReaderGroup() + require.Equal(t, 0, len(rg.table)) + rg.remove("no such id") + require.Equal(t, 0, len(rg.table)) + }) + + t.Run("assert inserting existing key returns error", func(t *testing.T) { + rg := newReaderGroup() + ctx, cf, err := rg.newContext("test-id", context.Background()) + requireGroupSuccess(t, ctx, cf, err) + require.Equal(t, 1, len(rg.table)) + + newCtx, newCf, err := rg.newContext("test-id", context.Background()) + requireGroupError(t, newCtx, newCf, err) + }) + + t.Run("assert new key is added, can be removed and its context is cancelled", func(t *testing.T) { + rg := newReaderGroup() + ctx, cf, err := rg.newContext("test-id", context.Background()) + requireGroupSuccess(t, ctx, cf, err) + require.Equal(t, 1, len(rg.table)) + + require.Nil(t, ctx.Err()) + rg.remove("test-id") + + require.Equal(t, 0, len(rg.table)) + require.Error(t, ctx.Err(), context.Canceled) + + newCtx, newCf, err := rg.newContext("test-id", context.Background()) + requireGroupSuccess(t, newCtx, newCf, err) + require.Equal(t, 1, len(rg.table)) + require.Nil(t, newCtx.Err()) + }) +} + +func TestDefaultHarvesterGroup(t *testing.T) { + source := &testSource{"/path/to/test"} + + requireSourceAddedToBookkeeper := func(t *testing.T, hg *defaultHarvesterGroup, s Source) { + require.True(t, hg.readers.hasID(s.Name())) + } + + requireSourceRemovedFromBookkeeper := func(t *testing.T, hg *defaultHarvesterGroup, s Source) { + require.False(t, hg.readers.hasID(s.Name())) + } + + t.Run("assert a harvester is started in a goroutine", func(t *testing.T) { + var wg sync.WaitGroup + mockHarvester := &mockHarvester{onRun: correctOnRun, wg: &wg} + hg := testDefaultHarvesterGroup(t, mockHarvester) + + gorountineChecker := resources.NewGoroutinesChecker() + defer gorountineChecker.WaitUntilOriginalCount() + + wg.Add(1) + hg.Start(input.Context{Logger: logp.L(), Cancelation: context.Background()}, source) + + // wait until harvester.Run is done + wg.Wait() + // wait until goroutine that started `harvester.Run` is finished + gorountineChecker.WaitUntilOriginalCount() + + require.Equal(t, 1, mockHarvester.getRunCount()) + + requireSourceRemovedFromBookkeeper(t, hg, source) + // stopped source can be stopped + require.Nil(t, hg.StopGroup()) + }) + + t.Run("assert a harvester can be stopped and removed from bookkeeper", func(t *testing.T) { + mockHarvester := &mockHarvester{onRun: blockUntilCancelOnRun} + hg := testDefaultHarvesterGroup(t, mockHarvester) + + gorountineChecker := resources.NewGoroutinesChecker() + + hg.Start(input.Context{Logger: logp.L(), Cancelation: context.Background()}, source) + + gorountineChecker.WaitUntilIncreased(1) + // wait until harvester is started + if mockHarvester.getRunCount() == 1 { + requireSourceAddedToBookkeeper(t, hg, source) + // after started, stop it + hg.Stop(source) + gorountineChecker.WaitUntilOriginalCount() + } + + requireSourceRemovedFromBookkeeper(t, hg, source) + }) + + t.Run("assert a harvester for same source cannot be started", func(t *testing.T) { + mockHarvester := &mockHarvester{onRun: blockUntilCancelOnRun} + hg := testDefaultHarvesterGroup(t, mockHarvester) + inputCtx := input.Context{Logger: logp.L(), Cancelation: context.Background()} + + gorountineChecker := resources.NewGoroutinesChecker() + defer gorountineChecker.WaitUntilOriginalCount() + + hg.Start(inputCtx, source) + hg.Start(inputCtx, source) + + gorountineChecker.WaitUntilIncreased(2) + // error is expected as a harvester group was expected to start twice for the same source + for !hg.readers.hasID(source.Name()) { + } + time.Sleep(3 * time.Millisecond) + + hg.Stop(source) + + err := hg.StopGroup() + require.Error(t, err) + + require.Equal(t, 1, mockHarvester.getRunCount()) + }) + + t.Run("assert a harvester panic is handled", func(t *testing.T) { + mockHarvester := &mockHarvester{onRun: panicOnRun} + hg := testDefaultHarvesterGroup(t, mockHarvester) + defer func() { + if v := recover(); v != nil { + t.Errorf("did not recover from harvester panic in defaultHarvesterGroup") + } + }() + + gorountineChecker := resources.NewGoroutinesChecker() + + hg.Start(input.Context{Logger: logp.L(), Cancelation: context.Background()}, source) + + // wait until harvester is stopped + gorountineChecker.WaitUntilOriginalCount() + + // make sure harvester had run once + require.Equal(t, 1, mockHarvester.getRunCount()) + requireSourceRemovedFromBookkeeper(t, hg, source) + + require.Nil(t, hg.StopGroup()) + }) + + t.Run("assert a harvester error is handled", func(t *testing.T) { + mockHarvester := &mockHarvester{onRun: errorOnRun} + hg := testDefaultHarvesterGroup(t, mockHarvester) + + gorountineChecker := resources.NewGoroutinesChecker() + defer gorountineChecker.WaitUntilOriginalCount() + + hg.Start(input.Context{Logger: logp.L(), Cancelation: context.Background()}, source) + + gorountineChecker.WaitUntilOriginalCount() + + requireSourceRemovedFromBookkeeper(t, hg, source) + + err := hg.StopGroup() + require.Error(t, err) + }) + + t.Run("assert already locked resource has to wait", func(t *testing.T) { + var wg sync.WaitGroup + mockHarvester := &mockHarvester{onRun: correctOnRun, wg: &wg} + hg := testDefaultHarvesterGroup(t, mockHarvester) + inputCtx := input.Context{Logger: logp.L(), Cancelation: context.Background()} + + r, err := lock(inputCtx, hg.store, source.Name()) + if err != nil { + t.Fatalf("cannot lock source") + } + + gorountineChecker := resources.NewGoroutinesChecker() + + wg.Add(1) + hg.Start(inputCtx, source) + + gorountineChecker.WaitUntilIncreased(1) + ok := false + for !ok { + // wait until harvester is added to the bookeeper + ok = hg.readers.hasID(source.Name()) + if ok { + releaseResource(r) + } + } + + // wait until harvester.Run is done + wg.Wait() + // wait until goroutine that started `harvester.Run` is finished + gorountineChecker.WaitUntilOriginalCount() + require.Equal(t, 1, mockHarvester.getRunCount()) + require.Nil(t, hg.StopGroup()) + }) + + t.Run("assert already locked resource has no problem when harvestergroup is cancelled", func(t *testing.T) { + mockHarvester := &mockHarvester{onRun: correctOnRun} + hg := testDefaultHarvesterGroup(t, mockHarvester) + inputCtx := input.Context{Logger: logp.L(), Cancelation: context.Background()} + + gorountineChecker := resources.NewGoroutinesChecker() + defer gorountineChecker.WaitUntilOriginalCount() + + r, err := lock(inputCtx, hg.store, source.Name()) + if err != nil { + t.Fatalf("cannot lock source") + } + defer releaseResource(r) + + hg.Start(inputCtx, source) + + gorountineChecker.WaitUntilIncreased(1) + require.Error(t, hg.StopGroup()) + + require.Equal(t, 0, mockHarvester.getRunCount()) + }) +} + +func testDefaultHarvesterGroup(t *testing.T, mockHarvester Harvester) *defaultHarvesterGroup { + return &defaultHarvesterGroup{ + readers: newReaderGroup(), + pipeline: &pipelinemock.MockPipelineConnector{}, + harvester: mockHarvester, + store: testOpenStore(t, "test", nil), + tg: unison.TaskGroup{}, + } +} + +type mockHarvester struct { + mu sync.Mutex + runCount int + + wg *sync.WaitGroup + onRun func(input.Context, Source, Cursor, Publisher) error +} + +func (m *mockHarvester) Run(ctx input.Context, s Source, c Cursor, p Publisher) error { + if m.wg != nil { + defer m.wg.Done() + } + + m.mu.Lock() + m.runCount += 1 + m.mu.Unlock() + + if m.onRun != nil { + return m.onRun(ctx, s, c, p) + } + return nil +} + +func (m *mockHarvester) getRunCount() int { + m.mu.Lock() + defer m.mu.Unlock() + + return m.runCount +} + +func (m *mockHarvester) Test(_ Source, _ input.TestContext) error { return nil } + +func (m *mockHarvester) Name() string { return "mock" } + +func correctOnRun(_ input.Context, _ Source, _ Cursor, _ Publisher) error { + return nil +} + +func blockUntilCancelOnRun(c input.Context, _ Source, _ Cursor, _ Publisher) error { + <-c.Cancelation.Done() + return nil +} + +func errorOnRun(_ input.Context, _ Source, _ Cursor, _ Publisher) error { + return fmt.Errorf("harvester error") +} + +func panicOnRun(_ input.Context, _ Source, _ Cursor, _ Publisher) error { + panic("don't panic") +} diff --git a/libbeat/tests/resources/goroutines.go b/libbeat/tests/resources/goroutines.go index 708ce4c201b..3351c7b2377 100644 --- a/libbeat/tests/resources/goroutines.go +++ b/libbeat/tests/resources/goroutines.go @@ -56,15 +56,11 @@ func (c GoroutinesChecker) Check(t testing.TB) { } func (c GoroutinesChecker) check(t testing.TB) error { - timeout := time.Now().Add(c.FinalizationTimeout) - var after int - for time.Now().Before(timeout) { - after = runtime.NumGoroutine() - if after <= c.before { - return nil - } - time.Sleep(10 * time.Millisecond) + after := c.WaitUntilOriginalCount() + if after == 0 { + return nil } + profile := pprof.Lookup("goroutine") profile.WriteTo(os.Stdout, 2) return fmt.Errorf("Possible goroutines leak, before: %d, after: %d", c.before, after) @@ -78,3 +74,24 @@ func CallAndCheckGoroutines(t testing.TB, f func()) { f() c.Check(t) } + +// WaitUntilOriginalCount waits until the original number of goroutines are +// present before we has created the resource checker. +func (c GoroutinesChecker) WaitUntilOriginalCount() int { + timeout := time.Now().Add(c.FinalizationTimeout) + var after int + for time.Now().Before(timeout) { + after = runtime.NumGoroutine() + if after <= c.before { + return 0 + } + time.Sleep(10 * time.Millisecond) + } + return after +} + +func (c *GoroutinesChecker) WaitUntilIncreased(n int) { + for runtime.NumGoroutine() < c.before+n { + time.Sleep(10 * time.Millisecond) + } +} From b10e70d51b036293c8f06b160d5514584fd1476b Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Wed, 10 Mar 2021 11:19:29 -0800 Subject: [PATCH 2/8] Move example to the correct location in reference docs (#24455) --- .../filebeat/docs/inputs/input-aws-s3.asciidoc | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc index fde7f734d6c..5d10e07a04e 100644 --- a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc @@ -45,6 +45,15 @@ call will be interrupted. The default AWS API call timeout for a message is 120 seconds. The minimum is 0 seconds. The maximum is half of the visibility timeout value. +[float] +==== `expand_event_list_from_field` + +If the fileset using this input expects to receive multiple messages bundled +under a specific field then the config option expand_event_list_from_field value +can be assigned the name of the field. This setting will be able to split the +messages under the group value into separate events. For example, CloudTrail logs +are in JSON format and events are found under the JSON object "Records". + ["source","json"] ---- { @@ -65,15 +74,6 @@ is 0 seconds. The maximum is half of the visibility timeout value. } ---- -[float] -==== `expand_event_list_from_field` - -If the fileset using this input expects to receive multiple messages bundled -under a specific field then the config option expand_event_list_from_field value -can be assigned the name of the field. This setting will be able to split the -messages under the group value into separate events. For example, CloudTrail logs -are in JSON format and events are found under the JSON object "Records". - Note: When `expand_event_list_from_field` parameter is given in the config, aws-s3 input will assume the logs are in JSON format and decode them as JSON. Content type will not be checked. From b4ac0eef65d1614f8f96a39f07a9892a79f07d77 Mon Sep 17 00:00:00 2001 From: EamonnTP Date: Thu, 11 Mar 2021 09:11:16 +0000 Subject: [PATCH 3/8] [DOCS] Restructure content for SSL settings (#24342) * Reorg ssl content * Edits following review * Review edits * Updates following review --- libbeat/docs/shared-ssl-config.asciidoc | 431 +++++++++++++++++------- 1 file changed, 317 insertions(+), 114 deletions(-) diff --git a/libbeat/docs/shared-ssl-config.asciidoc b/libbeat/docs/shared-ssl-config.asciidoc index 31eedd1e19a..ec0690397a5 100644 --- a/libbeat/docs/shared-ssl-config.asciidoc +++ b/libbeat/docs/shared-ssl-config.asciidoc @@ -87,25 +87,155 @@ Example module with SSL enabled: ---- endif::[] -[float] -=== Configuration options +There are a number of SSL configuration options available to you: + +* <> +* <> +* <> + +[discrete] +[[ssl-common-config]] +=== Common configuration options -You can specify the following options in the `ssl` section of the +{beatname_lc}.yml+ config file: +Common SSL configuration options can be used in both client and server configurations. +You can specify the following options in the `ssl` section of each subsystem that +supports SSL. [float] +[[enabled]] ==== `enabled` -The `enabled` setting can be used to disable the ssl configuration by setting -it to `false`. The default value is `true`. +To disable SSL configuration, set the value to `false`. The default value is `true`. -NOTE: SSL settings are disabled if either `enabled` is set to `false` or the +[NOTE] +===== +SSL settings are disabled if either `enabled` is set to `false` or the `ssl` section is missing. +===== [float] +[[supported-protocols]] +==== `supported_protocols` + +List of allowed SSL/TLS versions. If SSL/TLS server decides for protocol versions +not configured, the connection will be dropped during or after the handshake. The +setting is a list of allowed protocol versions: +`SSLv3`, `TLSv1` for TLS version 1.0, `TLSv1.0`, `TLSv1.1`, `TLSv1.2`, and +`TLSv1.3`. + +The default value is `[TLSv1.1, TLSv1.2, TLSv1.3]`. + +[float] +[[cipher-suites]] +==== `cipher_suites` + +The list of cipher suites to use. The first entry has the highest priority. +If this option is omitted, the Go crypto library's https://golang.org/pkg/crypto/tls/[default suites] +are used (recommended). Note that TLS 1.3 cipher suites are not +individually configurable in Go, so they are not included in this list. + +// tag::cipher_suites[] +The following cipher suites are available: + +[options="header"] +|=== +| Cypher | Notes +| ECDHE-ECDSA-AES-128-CBC-SHA | +| ECDHE-ECDSA-AES-128-CBC-SHA256 | TLS 1.2 only. Disabled by default. +| ECDHE-ECDSA-AES-128-GCM-SHA256 | TLS 1.2 only. +| ECDHE-ECDSA-AES-256-CBC-SHA | +| ECDHE-ECDSA-AES-256-GCM-SHA384 | TLS 1.2 only. +| ECDHE-ECDSA-CHACHA20-POLY1305 | TLS 1.2 only. +| ECDHE-ECDSA-RC4-128-SHA | Disabled by default. RC4 not recommended. +| ECDHE-RSA-3DES-CBC3-SHA | +| ECDHE-RSA-AES-128-CBC-SHA | +| ECDHE-RSA-AES-128-CBC-SHA256 | TLS 1.2 only. Disabled by default. +| ECDHE-RSA-AES-128-GCM-SHA256 | TLS 1.2 only. +| ECDHE-RSA-AES-256-CBC-SHA | +| ECDHE-RSA-AES-256-GCM-SHA384 | TLS 1.2 only. +| ECDHE-RSA-CHACHA20-POLY1205 | TLS 1.2 only. +| ECDHE-RSA-RC4-128-SHA | Disabled by default. RC4 not recommended. +| RSA-3DES-CBC3-SHA | +| RSA-AES-128-CBC-SHA | +| RSA-AES-128-CBC-SHA256 | TLS 1.2 only. Disabled by default. +| RSA-AES-128-GCM-SHA256 | TLS 1.2 only. +| RSA-AES-256-CBC-SHA | +| RSA-AES-256-GCM-SHA384 | TLS 1.2 only. +| RSA-RC4-128-SHA | Disabled by default. RC4 not recommended. +|=== + +Here is a list of acronyms used in defining the cipher suites: + +* 3DES: + Cipher suites using triple DES + +* AES-128/256: + Cipher suites using AES with 128/256-bit keys. + +* CBC: + Cipher using Cipher Block Chaining as block cipher mode. + +* ECDHE: + Cipher suites using Elliptic Curve Diffie-Hellman (DH) ephemeral key exchange. + +* ECDSA: + Cipher suites using Elliptic Curve Digital Signature Algorithm for authentication. + +* GCM: + Galois/Counter mode is used for symmetric key cryptography. + +* RC4: + Cipher suites using RC4. + +* RSA: + Cipher suites using RSA. + +* SHA, SHA256, SHA384: + Cipher suites using SHA-1, SHA-256 or SHA-384. +// end::cipher_suites[] + +[float] +[[curve-types]] +==== `curve_types` + +The list of curve types for ECDHE (Elliptic Curve Diffie-Hellman ephemeral key exchange). + +The following elliptic curve types are available: + +* P-256 +* P-384 +* P-521 +* X25519 + +[float] +[[ca-sha256]] +==== `ca_sha256` + +This configures a certificate pin that you can use to ensure that a specific certificate is part of the verified chain. + +The pin is a base64 encoded string of the SHA-256 of the certificate. + +NOTE: This check is not a replacement for the normal SSL validation, but it adds additional validation. +If this option is used with `verification_mode` set to `none`, the check will always fail because +it will not receive any verified chains. + +[discrete] +[[ssl-client-config]] +=== Client configuration options + +You can specify the following options in the `ssl` section of each subsystem that +supports SSL. + +[float] +[[client-certificate-authorities]] ==== `certificate_authorities` -The list of root certificates for server verifications. If `certificate_authorities` is empty or not set, the trusted certificate authorities of the host system are used. If `certificate_authorities` is self-signed, the host system needs to trust that CA cert as well. -By default you can specify a list of files that +{beatname_lc} will read, but you can also embed a certificate directly in the `YAML` configuration: +The list of root certificates for verifications is required. If `certificate_authorities` is empty or not set, the +system keystore is used. If `certificate_authorities` is self-signed, the host system +needs to trust that CA cert as well. + +By default you can specify a list of files that +{beatname_lc}+ will read, but you +can also embed a certificate directly in the `YAML` configuration: [source,yaml] ---- @@ -133,16 +263,17 @@ certificate_authorities: ---- [float] -[[certificate]] +[[client-certificate]] ==== `certificate: "/etc/pki/client/cert.pem"` -The path to the certificate for SSL client authentication. If the certificate +The path to the certificate for SSL client authentication is only required if +`client_authentication` is specified. If the certificate is not specified, client authentication is not available. The connection might fail if the server requests client authentication. If the SSL server does not require client authentication, the certificate will be loaded, but not requested or used by the server. -When this option is configured, the <> option is also required. +When this option is configured, the <> option is also required. The certificate option support embedding of the certificate: [source,yaml] @@ -169,13 +300,12 @@ certificate: | -----END CERTIFICATE----- ---- - [float] -[[key]] +[[client-key]] ==== `key: "/etc/pki/client/cert.key"` -The client certificate key used for client authentication. This option is required if <> is specified. -The key option support embedding of the private key: +The client certificate key used for client authentication and is only required +if `client_authentication` is configured. The key option support embedding of the private key: [source,yaml] ---- @@ -211,157 +341,230 @@ key: | ---- [float] +[[client-key-passphrase]] ==== `key_passphrase` The passphrase used to decrypt an encrypted key stored in the configured `key` file. -[float] -==== `supported_protocols` - -List of allowed SSL/TLS versions. If SSL/TLS server decides for protocol versions -not configured, the connection will be dropped during or after the handshake. The -setting is a list of allowed protocol versions: -`SSLv3`, `TLSv1` for TLS version 1.0, `TLSv1.0`, `TLSv1.1`, `TLSv1.2`, and -`TLSv1.3`. - -The default value is `[TLSv1.1, TLSv1.2, TLSv1.3]`. [float] +[[client-verification-mode]] ==== `verification_mode` -Controls the verification of certificates. Valid values are: +Controls the verification of server certificates. Valid values are: - * `full`, which verifies that the provided certificate is signed by a trusted +`full`:: +Verifies that the provided certificate is signed by a trusted authority (CA) and also verifies that the server's hostname (or IP address) matches the names identified within the certificate. - * `strict`, which verifies that the provided certificate is signed by a trusted + +`strict`:: +Verifies that the provided certificate is signed by a trusted authority (CA) and also verifies that the server's hostname (or IP address) matches the names identified within the certificate. If the Subject Alternative Name is empty, it returns an error. - * `certificate`, which verifies that the provided certificate is signed by a + +`certificate`:: +Verifies that the provided certificate is signed by a trusted authority (CA), but does not perform any hostname verification. - * `none`, which performs _no verification_ of the server's certificate. This + +`none`:: +Performs _no verification_ of the server's certificate. This mode disables many of the security benefits of SSL/TLS and should only be used -after very careful consideration. It is primarily intended as a temporary +after cautious consideration. It is primarily intended as a temporary diagnostic mechanism when attempting to resolve TLS errors; its use in production environments is strongly discouraged. - ++ The default value is `full`. -[float] -==== `cipher_suites` +[discrete] +[[ssl-server-config]] +=== Server configuration options -The list of cipher suites to use. The first entry has the highest priority. -If this option is omitted, the Go crypto library's https://golang.org/pkg/crypto/tls/[default suites] -are used (recommended). Note that TLS 1.3 cipher suites are not -individually configurable in Go, so they are not included in this list. +You can specify the following options in the `ssl` section of each subsystem that +supports SSL. -// tag::cipher_suites[] -The following cipher suites are available: - -[options="header"] -|=== -| Cypher | Notes -| ECDHE-ECDSA-AES-128-CBC-SHA | -| ECDHE-ECDSA-AES-128-CBC-SHA256 | TLS 1.2 only. Disabled by default. -| ECDHE-ECDSA-AES-128-GCM-SHA256 | TLS 1.2 only. -| ECDHE-ECDSA-AES-256-CBC-SHA | -| ECDHE-ECDSA-AES-256-GCM-SHA384 | TLS 1.2 only. -| ECDHE-ECDSA-CHACHA20-POLY1305 | TLS 1.2 only. -| ECDHE-ECDSA-RC4-128-SHA | Disabled by default. RC4 not recommended. -| ECDHE-RSA-3DES-CBC3-SHA | -| ECDHE-RSA-AES-128-CBC-SHA | -| ECDHE-RSA-AES-128-CBC-SHA256 | TLS 1.2 only. Disabled by default. -| ECDHE-RSA-AES-128-GCM-SHA256 | TLS 1.2 only. -| ECDHE-RSA-AES-256-CBC-SHA | -| ECDHE-RSA-AES-256-GCM-SHA384 | TLS 1.2 only. -| ECDHE-RSA-CHACHA20-POLY1205 | TLS 1.2 only. -| ECDHE-RSA-RC4-128-SHA | Disabled by default. RC4 not recommended. -| RSA-3DES-CBC3-SHA | -| RSA-AES-128-CBC-SHA | -| RSA-AES-128-CBC-SHA256 | TLS 1.2 only. Disabled by default. -| RSA-AES-128-GCM-SHA256 | TLS 1.2 only. -| RSA-AES-256-CBC-SHA | -| RSA-AES-256-GCM-SHA384 | TLS 1.2 only. -| RSA-RC4-128-SHA | Disabled by default. RC4 not recommended. -|=== +[float] +[[server-certificate-authorities]] +==== `certificate_authorities` -Here is a list of acronyms used in defining the cipher suites: +The list of root certificates for client verifications is only required if +`client_authentication` is configured. If `certificate_authorities` is empty or not set, and +`client_authentication` is configured, the system keystore is used. -* 3DES: - Cipher suites using triple DES +If `certificate_authorities` is self-signed, the host system needs to trust that CA cert as well. +By default you can specify a list of files that +{beatname_lc}+ will read, but you can also embed a certificate +directly in the `YAML` configuration: -* AES-128/256: - Cipher suites using AES with 128/256-bit keys. +[source,yaml] +---- +certificate_authorities: + - | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgITJ706Mu2wJlKckpIvkWxEHvEyijANBgkqhkiG9w0BAQsF + ADAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwIBcNMTkwNzIyMTkyOTA0WhgPMjExOTA2 + MjgxOTI5MDRaMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEB + BQADggEPADCCAQoCggEBANce58Y/JykI58iyOXpxGfw0/gMvF0hUQAcUrSMxEO6n + fZRA49b4OV4SwWmA3395uL2eB2NB8y8qdQ9muXUdPBWE4l9rMZ6gmfu90N5B5uEl + 94NcfBfYOKi1fJQ9i7WKhTjlRkMCgBkWPkUokvBZFRt8RtF7zI77BSEorHGQCk9t + /D7BS0GJyfVEhftbWcFEAG3VRcoMhF7kUzYwp+qESoriFRYLeDWv68ZOvG7eoWnP + PsvZStEVEimjvK5NSESEQa9xWyJOmlOKXhkdymtcUd/nXnx6UTCFgnkgzSdTWV41 + CI6B6aJ9svCTI2QuoIq2HxX/ix7OvW1huVmcyHVxyUECAwEAAaNTMFEwHQYDVR0O + BBYEFPwN1OceFGm9v6ux8G+DZ3TUDYxqMB8GA1UdIwQYMBaAFPwN1OceFGm9v6ux + 8G+DZ3TUDYxqMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAG5D + 874A4YI7YUwOVsVAdbWtgp1d0zKcPRR+r2OdSbTAV5/gcS3jgBJ3i1BN34JuDVFw + 3DeJSYT3nxy2Y56lLnxDeF8CUTUtVQx3CuGkRg1ouGAHpO/6OqOhwLLorEmxi7tA + H2O8mtT0poX5AnOAhzVy7QW0D/k4WaoLyckM5hUa6RtvgvLxOwA0U+VGurCDoctu + 8F4QOgTAWyh8EZIwaKCliFRSynDpv3JTUwtfZkxo6K6nce1RhCWFAsMvDZL8Dgc0 + yvgJ38BRsFOtkRuAGSf6ZUwTO8JJRRIFnpUzXflAnGivK9M13D5GEQMmIl6U9Pvk + sxSmbIUfc2SGJGCJD4I= + -----END CERTIFICATE----- +---- -* CBC: - Cipher using Cipher Block Chaining as block cipher mode. +[float] +[[server-certificate]] +==== `certificate: "/etc/pki/server/cert.pem"` -* ECDHE: - Cipher suites using Elliptic Curve Diffie-Hellman (DH) ephemeral key exchange. +For server authentication, the path to the SSL authentication certificate must +be specified for TLS. If the certificate is not specified, startup will fail. -* ECDSA: - Cipher suites using Elliptic Curve Digital Signature Algorithm for authentication. +When this option is configured, the <> option is also required. +The certificate option support embedding of the certificate: -* GCM: - Galois/Counter mode is used for symmetric key cryptography. +[source,yaml] +---- +certificate: | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgITJ706Mu2wJlKckpIvkWxEHvEyijANBgkqhkiG9w0BAQsF + ADAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwIBcNMTkwNzIyMTkyOTA0WhgPMjExOTA2 + MjgxOTI5MDRaMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEB + BQADggEPADCCAQoCggEBANce58Y/JykI58iyOXpxGfw0/gMvF0hUQAcUrSMxEO6n + fZRA49b4OV4SwWmA3395uL2eB2NB8y8qdQ9muXUdPBWE4l9rMZ6gmfu90N5B5uEl + 94NcfBfYOKi1fJQ9i7WKhTjlRkMCgBkWPkUokvBZFRt8RtF7zI77BSEorHGQCk9t + /D7BS0GJyfVEhftbWcFEAG3VRcoMhF7kUzYwp+qESoriFRYLeDWv68ZOvG7eoWnP + PsvZStEVEimjvK5NSESEQa9xWyJOmlOKXhkdymtcUd/nXnx6UTCFgnkgzSdTWV41 + CI6B6aJ9svCTI2QuoIq2HxX/ix7OvW1huVmcyHVxyUECAwEAAaNTMFEwHQYDVR0O + BBYEFPwN1OceFGm9v6ux8G+DZ3TUDYxqMB8GA1UdIwQYMBaAFPwN1OceFGm9v6ux + 8G+DZ3TUDYxqMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAG5D + 874A4YI7YUwOVsVAdbWtgp1d0zKcPRR+r2OdSbTAV5/gcS3jgBJ3i1BN34JuDVFw + 3DeJSYT3nxy2Y56lLnxDeF8CUTUtVQx3CuGkRg1ouGAHpO/6OqOhwLLorEmxi7tA + H2O8mtT0poX5AnOAhzVy7QW0D/k4WaoLyckM5hUa6RtvgvLxOwA0U+VGurCDoctu + 8F4QOgTAWyh8EZIwaKCliFRSynDpv3JTUwtfZkxo6K6nce1RhCWFAsMvDZL8Dgc0 + yvgJ38BRsFOtkRuAGSf6ZUwTO8JJRRIFnpUzXflAnGivK9M13D5GEQMmIl6U9Pvk + sxSmbIUfc2SGJGCJD4I= + -----END CERTIFICATE----- +---- -* RC4: - Cipher suites using RC4. +[float] +[[server-key]] +==== `key: "/etc/pki/server/cert.key"` -* RSA: - Cipher suites using RSA. +The server certificate key used for authentication is required. +The key option support embedding of the private key: -* SHA, SHA256, SHA384: - Cipher suites using SHA-1, SHA-256 or SHA-384. -// end::cipher_suites[] +[source,yaml] +---- +key: | + -----BEGIN PRIVATE KEY----- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDXHufGPycpCOfI + sjl6cRn8NP4DLxdIVEAHFK0jMRDup32UQOPW+DleEsFpgN9/ebi9ngdjQfMvKnUP + Zrl1HTwVhOJfazGeoJn7vdDeQebhJfeDXHwX2DiotXyUPYu1ioU45UZDAoAZFj5F + KJLwWRUbfEbRe8yO+wUhKKxxkApPbfw+wUtBicn1RIX7W1nBRABt1UXKDIRe5FM2 + MKfqhEqK4hUWC3g1r+vGTrxu3qFpzz7L2UrRFRIpo7yuTUhEhEGvcVsiTppTil4Z + HcprXFHf5158elEwhYJ5IM0nU1leNQiOgemifbLwkyNkLqCKth8V/4sezr1tYblZ + nMh1cclBAgMBAAECggEBAKdP5jyOicqknoG9/G564RcDsDyRt64NuO7I6hBg7SZx + Jn7UKWDdFuFP/RYtoabn6QOxkVVlydp5Typ3Xu7zmfOyss479Q/HIXxmmbkD0Kp0 + eRm2KN3y0b6FySsS40KDRjKGQCuGGlNotW3crMw6vOvvsLTlcKgUHF054UVCHoK/ + Piz7igkDU7NjvJeha53vXL4hIjb10UtJNaGPxIyFLYRZdRPyyBJX7Yt3w8dgz8WM + epOPu0dq3bUrY3WQXcxKZo6sQjE1h7kdl4TNji5jaFlvD01Y8LnyG0oThOzf0tve + Gaw+kuy17gTGZGMIfGVcdeb+SlioXMAAfOps+mNIwTECgYEA/gTO8W0hgYpOQJzn + BpWkic3LAoBXWNpvsQkkC3uba8Fcps7iiEzotXGfwYcb5Ewf5O3Lrz1EwLj7GTW8 + VNhB3gb7bGOvuwI/6vYk2/dwo84bwW9qRWP5hqPhNZ2AWl8kxmZgHns6WTTxpkRU + zrfZ5eUrBDWjRU2R8uppgRImsxMCgYEA2MxuL/C/Ko0d7XsSX1kM4JHJiGpQDvb5 + GUrlKjP/qVyUysNF92B9xAZZHxxfPWpdfGGBynhw7X6s+YeIoxTzFPZVV9hlkpAA + 5igma0n8ZpZEqzttjVdpOQZK8o/Oni/Q2S10WGftQOOGw5Is8+LY30XnLvHBJhO7 + TKMurJ4KCNsCgYAe5TDSVmaj3dGEtFC5EUxQ4nHVnQyCpxa8npL+vor5wSvmsfUF + hO0s3GQE4sz2qHecnXuPldEd66HGwC1m2GKygYDk/v7prO1fQ47aHi9aDQB9N3Li + e7Vmtdn3bm+lDjtn0h3Qt0YygWj+wwLZnazn9EaWHXv9OuEMfYxVgYKpdwKBgEze + Zy8+WDm5IWRjn8cI5wT1DBT/RPWZYgcyxABrwXmGZwdhp3wnzU/kxFLAl5BKF22T + kRZ+D+RVZvVutebE9c937BiilJkb0AXLNJwT9pdVLnHcN2LHHHronUhV7vetkop+ + kGMMLlY0lkLfoGq1AxpfSbIea9KZam6o6VKxEnPDAoGAFDCJm+ZtsJK9nE5GEMav + NHy+PwkYsHhbrPl4dgStTNXLenJLIJ+Ke0Pcld4ZPfYdSyu/Tv4rNswZBNpNsW9K + 0NwJlyMBfayoPNcJKXrH/csJY7hbKviAHr1eYy9/8OL0dHf85FV+9uY5YndLcsDc + nygO9KTJuUiBrLr0AHEnqko= + -----END PRIVATE KEY----- +---- [float] -==== `curve_types` +[[server-key-passphrase]] +==== `key_passphrase` -The list of curve types for ECDHE (Elliptic Curve Diffie-Hellman ephemeral key exchange). +The passphrase is used to decrypt an encrypted key stored in the configured `key` file. -The following elliptic curve types are available: +[float] +[[server-verification-mode]] +==== `verification_mode` -* P-256 -* P-384 -* P-521 -* X25519 +Controls the verification of client certificates. Valid values are: -[float] -==== `renegotiation` +`full`:: +Verifies that the provided certificate is signed by a trusted +authority (CA) and also verifies that the server's hostname (or IP address) +matches the names identified within the certificate. -This configures what types of TLS renegotiation are supported. The valid options -are `never`, `once`, and `freely`. The default value is never. +`strict`:: +Verifies that the provided certificate is signed by a trusted +authority (CA) and also verifies that the server's hostname (or IP address) +matches the names identified within the certificate. If the Subject Alternative +Name is empty, it returns an error. -* `never` - Disables renegotiation. -* `once` - Allows a remote server to request renegotiation once per connection. -* `freely` - Allows a remote server to repeatedly request renegotiation. +`certificate`:: +Verifies that the provided certificate is signed by a +trusted authority (CA), but does not perform any hostname verification. +`none`:: +Performs _no verification_ of the server's certificate. This +mode disables many of the security benefits of SSL/TLS and should only be used +after cautious consideration. It is primarily intended as a temporary +diagnostic mechanism when attempting to resolve TLS errors; its use in +production environments is strongly discouraged. ++ +The default value is `full`. [float] -==== `ca_sha256` +[[server-renegotiation]] +==== `renegotiation` -This configures a certificate pin that you can use to ensure that a specific certificate is part of the verified chain. +This configures what types of TLS renegotiation are supported. The valid options +are: -The pin is a base64 encoded string of the SHA-256 of the certificate. +`never`:: +Disables renegotiation. -NOTE: This check is not a replacement for the normal SSL validation, but it adds additional validation. -If this option is used with `verification_mode` set to `none`, the check will always fail because -it will not receive any verified chains. +`once`:: +Allows a remote server to request renegotiation once per connection. +`freely`:: +Allows a remote server to request renegotiation repeatedly. ++ +The default value is `never`. ifeval::["{beatname_lc}" == "filebeat"] [float] +[[server-client-renegotiation]] ==== `client_authentication` -This configures what types of client authentication are supported. The valid options -are `none`, `optional`, and `required`. When `certificate_authorities` is set it will -default to `required` otherwise it will be set to `none`. +The type of client authentication mode. When `certificate_authorities` is set, it +defaults to `required`. Otherwise, it defaults to `none`. + +The valid options are: + +`none`:: +Disables client authentication. -NOTE: This option is only valid with the TCP or the Syslog input. +`optional`:: +When a client certificate is supplied, the server will verify it. -* `none` - Disables client authentication. -* `optional` - When a client certificate is given, the server will verify it. -* `required` - Will require clients to provide a valid certificate. +`required`:: +Will require clients to provide a valid certificate. endif::[] From f323b363dcd73322907588b26bd8ef359183f32a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Thu, 11 Mar 2021 10:14:22 +0100 Subject: [PATCH 4/8] Port four Harvester tests of log input to filestream in Golang (#24250) --- .../filestream/input_integration_test.go | 109 +++++++++++++++++- libbeat/reader/readfile/line_test.go | 35 ++++++ 2 files changed, 140 insertions(+), 4 deletions(-) diff --git a/filebeat/input/filestream/input_integration_test.go b/filebeat/input/filestream/input_integration_test.go index e3c08b434ac..fecceb38789 100644 --- a/filebeat/input/filestream/input_integration_test.go +++ b/filebeat/input/filestream/input_integration_test.go @@ -21,8 +21,11 @@ package filestream import ( "context" + "os" "runtime" "testing" + + loginp "github.com/elastic/beats/v7/filebeat/input/filestream/internal/input-logfile" ) // test_close_renamed from test_harvester.py @@ -57,7 +60,6 @@ func TestFilestreamCloseRenamed(t *testing.T) { newerTestlines := []byte("new first log line\nnew second log line\n") env.mustWriteLinesToFile(testlogName, newerTestlines) - // new two events arrived env.waitUntilEventCount(3) cancelInput() @@ -67,6 +69,46 @@ func TestFilestreamCloseRenamed(t *testing.T) { env.requireOffsetInRegistry(testlogName, len(newerTestlines)) } +// test_close_removed from test_harvester.py +func TestFilestreamCloseRemoved(t *testing.T) { + env := newInputTestingEnvironment(t) + + testlogName := "test.log" + inp := env.mustCreateInput(map[string]interface{}{ + "paths": []string{env.abspath(testlogName) + "*"}, + "prospector.scanner.check_interval": "24h", + "close.on_state_change.check_interval": "1ms", + "close.on_state_change.removed": "true", + }) + + ctx, cancelInput := context.WithCancel(context.Background()) + env.startInput(ctx, inp) + + testlines := []byte("first log line\n") + env.mustWriteLinesToFile(testlogName, testlines) + + // first event has made it successfully + env.waitUntilEventCount(1) + + env.requireOffsetInRegistry(testlogName, len(testlines)) + + fi, err := os.Stat(env.abspath(testlogName)) + if err != nil { + t.Fatalf("cannot stat file: %+v", err) + } + + env.mustRemoveFile(testlogName) + + env.waitUntilHarvesterIsDone() + + cancelInput() + env.waitUntilInputStops() + + identifier, _ := newINodeDeviceIdentifier(nil) + src := identifier.GetSource(loginp.FSEvent{Info: fi, Op: loginp.OpCreate, NewPath: env.abspath(testlogName)}) + env.requireOffsetInRegistryByID(src.Name(), len(testlines)) +} + // test_close_eof from test_harvester.py func TestFilestreamCloseEOF(t *testing.T) { env := newInputTestingEnvironment(t) @@ -78,13 +120,13 @@ func TestFilestreamCloseEOF(t *testing.T) { "close.reader.on_eof": "true", }) - ctx, cancelInput := context.WithCancel(context.Background()) - env.startInput(ctx, inp) - testlines := []byte("first log line\n") expectedOffset := len(testlines) env.mustWriteLinesToFile(testlogName, testlines) + ctx, cancelInput := context.WithCancel(context.Background()) + env.startInput(ctx, inp) + // first event has made it successfully env.waitUntilEventCount(1) env.requireOffsetInRegistry(testlogName, expectedOffset) @@ -100,3 +142,62 @@ func TestFilestreamCloseEOF(t *testing.T) { env.requireOffsetInRegistry(testlogName, expectedOffset) } + +// test_empty_lines from test_harvester.py +func TestFilestreamEmptyLine(t *testing.T) { + env := newInputTestingEnvironment(t) + + testlogName := "test.log" + inp := env.mustCreateInput(map[string]interface{}{ + "paths": []string{env.abspath(testlogName)}, + "prospector.scanner.check_interval": "1ms", + }) + + ctx, cancelInput := context.WithCancel(context.Background()) + env.startInput(ctx, inp) + + testlines := []byte("first log line\nnext is an empty line\n") + env.mustWriteLinesToFile(testlogName, testlines) + + env.waitUntilEventCount(2) + env.requireOffsetInRegistry(testlogName, len(testlines)) + + moreTestlines := []byte("\nafter an empty line\n") + env.mustAppendLinesToFile(testlogName, moreTestlines) + + env.waitUntilEventCount(3) + env.requireEventsReceived([]string{ + "first log line", + "next is an empty line", + "after an empty line", + }) + + cancelInput() + env.waitUntilInputStops() + + env.requireOffsetInRegistry(testlogName, len(testlines)+len(moreTestlines)) +} + +// test_empty_lines_only from test_harvester.py +// This test differs from the original because in filestream +// input offset is no longer persisted when the line is empty. +func TestFilestreamEmptyLinesOnly(t *testing.T) { + env := newInputTestingEnvironment(t) + + testlogName := "test.log" + inp := env.mustCreateInput(map[string]interface{}{ + "paths": []string{env.abspath(testlogName)}, + "prospector.scanner.check_interval": "1ms", + }) + + ctx, cancelInput := context.WithCancel(context.Background()) + env.startInput(ctx, inp) + + testlines := []byte("\n\n\n") + env.mustWriteLinesToFile(testlogName, testlines) + + cancelInput() + env.waitUntilInputStops() + + env.requireNoEntryInRegistry(testlogName) +} diff --git a/libbeat/reader/readfile/line_test.go b/libbeat/reader/readfile/line_test.go index d91544162c5..df28f3345b9 100644 --- a/libbeat/reader/readfile/line_test.go +++ b/libbeat/reader/readfile/line_test.go @@ -30,6 +30,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/text/transform" "github.com/elastic/beats/v7/libbeat/reader/readfile/encoding" @@ -390,3 +391,37 @@ func TestMaxBytesLimit(t *testing.T) { } } } + +// test_exceed_buffer from test_harvester.py +func TestBufferSize(t *testing.T) { + lines := []string{ + "first line is too long\n", + "second line is too long\n", + "third line too long\n", + "OK\n", + } + + codecFactory, _ := encoding.FindEncoding("") + codec, _ := codecFactory(bytes.NewBuffer(nil)) + bufferSize := 10 + + in := ioutil.NopCloser(strings.NewReader(strings.Join(lines, ""))) + reader, err := NewLineReader(in, Config{codec, bufferSize, AutoLineTerminator, 1024}) + if err != nil { + t.Fatal("failed to initialize reader:", err) + } + + for i := 0; i < len(lines); i++ { + b, n, err := reader.Next() + if err != nil { + if err == io.EOF { + break + } else { + t.Fatal("unexpected error:", err) + } + } + + require.Equal(t, n, len(lines[i])) + require.Equal(t, string(b[:n]), lines[i]) + } +} From aabb1676ed1bd355acc1cc159ad5de289b21be08 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Thu, 11 Mar 2021 14:57:36 +0100 Subject: [PATCH 5/8] [Ingest Manager] Sync on rename on windows (#24504) --- .../install/atomic/atomic_installer.go | 18 +++++++++--------- .../pkg/artifact/install/zip/zip_installer.go | 11 +++++++++++ 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/x-pack/elastic-agent/pkg/artifact/install/atomic/atomic_installer.go b/x-pack/elastic-agent/pkg/artifact/install/atomic/atomic_installer.go index 60c5f1d28cf..a9406cf9815 100644 --- a/x-pack/elastic-agent/pkg/artifact/install/atomic/atomic_installer.go +++ b/x-pack/elastic-agent/pkg/artifact/install/atomic/atomic_installer.go @@ -55,15 +55,8 @@ func (i *Installer) Install(ctx context.Context, spec program.Spec, version, ins // on windows rename is not atomic, let's force it to flush the cache defer func() { if runtime.GOOS == "windows" { - if f, err := os.OpenFile(installDir, os.O_RDWR, 0777); err == nil { - f.Sync() - f.Close() - } - - if f, err := os.OpenFile(tempInstallDir, os.O_RDWR, 0777); err == nil { - f.Sync() - f.Close() - } + syncDir(installDir) + syncDir(tempInstallDir) } }() @@ -87,3 +80,10 @@ func (i *Installer) Install(ctx context.Context, spec program.Spec, version, ins return nil } + +func syncDir(dir string) { + if f, err := os.OpenFile(dir, os.O_RDWR, 0777); err == nil { + f.Sync() + f.Close() + } +} diff --git a/x-pack/elastic-agent/pkg/artifact/install/zip/zip_installer.go b/x-pack/elastic-agent/pkg/artifact/install/zip/zip_installer.go index 034e4be7b41..eba432feefb 100644 --- a/x-pack/elastic-agent/pkg/artifact/install/zip/zip_installer.go +++ b/x-pack/elastic-agent/pkg/artifact/install/zip/zip_installer.go @@ -61,9 +61,13 @@ func (i *Installer) Install(ctx context.Context, spec program.Spec, version, ins // if root directory is not the same as desired directory rename // e.g contains `-windows-` or `-SNAPSHOT-` if rootDir != installDir { + defer syncDir(rootDir) + defer syncDir(installDir) + if err := os.Rename(rootDir, installDir); err != nil { return errors.New(err, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, installDir)) } + } return nil @@ -155,3 +159,10 @@ func (i *Installer) getRootDir(zipPath string) (dir string, err error) { return rootDir, nil } + +func syncDir(dir string) { + if f, err := os.OpenFile(dir, os.O_RDWR, 0777); err == nil { + f.Sync() + f.Close() + } +} From 1db96a39bd83da4941d043cebc397c91e6570ac5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Thu, 11 Mar 2021 15:16:37 +0100 Subject: [PATCH 6/8] Add tests for encoding settings of filestream input (#24426) --- filebeat/input/filestream/config.go | 4 +- filebeat/input/filestream/input.go | 37 ++++----- .../filestream/input_integration_test.go | 78 +++++++++++++++++++ 3 files changed, 93 insertions(+), 26 deletions(-) diff --git a/filebeat/input/filestream/config.go b/filebeat/input/filestream/config.go index 5b582ccf6e8..e199a6e6870 100644 --- a/filebeat/input/filestream/config.go +++ b/filebeat/input/filestream/config.go @@ -30,7 +30,7 @@ import ( // Config stores the options of a file stream. type config struct { - readerConfig + Reader readerConfig `config:",inline"` Paths []string `config:"paths"` Close closerConfig `config:"close"` @@ -79,7 +79,7 @@ type backoffConfig struct { func defaultConfig() config { return config{ - readerConfig: defaultReaderConfig(), + Reader: defaultReaderConfig(), Paths: []string{}, Close: defaultCloserConfig(), CleanInactive: 0, diff --git a/filebeat/input/filestream/input.go b/filebeat/input/filestream/input.go index 8f21cb505ce..9c23e18473a 100644 --- a/filebeat/input/filestream/input.go +++ b/filebeat/input/filestream/input.go @@ -53,14 +53,8 @@ type fileMeta struct { // are actively written by other applications. type filestream struct { readerConfig readerConfig - bufferSize int - tailFile bool // TODO encodingFactory encoding.EncodingFactory encoding encoding.Encoding - lineTerminator readfile.LineTerminator - excludeLines []match.Matcher - includeLines []match.Matcher - maxBytes int closerConfig closerConfig } @@ -97,9 +91,9 @@ func configure(cfg *common.Config) (loginp.Prospector, loginp.Harvester, error) return nil, nil, fmt.Errorf("error while creating file identifier: %v", err) } - encodingFactory, ok := encoding.FindEncoding(config.Encoding) + encodingFactory, ok := encoding.FindEncoding(config.Reader.Encoding) if !ok || encodingFactory == nil { - return nil, nil, fmt.Errorf("unknown encoding('%v')", config.Encoding) + return nil, nil, fmt.Errorf("unknown encoding('%v')", config.Reader.Encoding) } prospector := &fileProspector{ @@ -111,13 +105,8 @@ func configure(cfg *common.Config) (loginp.Prospector, loginp.Harvester, error) } filestream := &filestream{ - readerConfig: config.readerConfig, - bufferSize: config.BufferSize, + readerConfig: config.Reader, encodingFactory: encodingFactory, - lineTerminator: config.LineTerminator, - excludeLines: config.ExcludeLines, - includeLines: config.IncludeLines, - maxBytes: config.MaxBytes, closerConfig: config.Close, } @@ -191,7 +180,7 @@ func (inp *filestream) open(log *logp.Logger, canceler input.Canceler, path stri return nil, err } - log.Debug("newLogFileReader with config.MaxBytes:", inp.maxBytes) + log.Debug("newLogFileReader with config.MaxBytes:", inp.readerConfig.MaxBytes) // TODO: NewLineReader uses additional buffering to deal with encoding and testing // for new lines in input stream. Simple 8-bit based encodings, or plain @@ -211,13 +200,13 @@ func (inp *filestream) open(log *logp.Logger, canceler input.Canceler, path stri // for the worst case scenario where incoming UTF32 charchers are decoded to the single byte UTF-8 characters. // This limit serves primarily to avoid memory bload or potential OOM with expectedly long lines in the file. // The further size limiting is performed by LimitReader at the end of the readers pipeline as needed. - encReaderMaxBytes := inp.maxBytes * 4 + encReaderMaxBytes := inp.readerConfig.MaxBytes * 4 var r reader.Reader r, err = readfile.NewEncodeReader(dbgReader, readfile.Config{ Codec: inp.encoding, - BufferSize: inp.bufferSize, - Terminator: inp.lineTerminator, + BufferSize: inp.readerConfig.BufferSize, + Terminator: inp.readerConfig.LineTerminator, MaxBytes: encReaderMaxBytes, }) if err != nil { @@ -225,8 +214,8 @@ func (inp *filestream) open(log *logp.Logger, canceler input.Canceler, path stri return nil, err } - r = readfile.NewStripNewline(r, inp.lineTerminator) - r = readfile.NewLimitReader(r, inp.maxBytes) + r = readfile.NewStripNewline(r, inp.readerConfig.LineTerminator) + r = readfile.NewLimitReader(r, inp.readerConfig.MaxBytes) return r, nil } @@ -335,14 +324,14 @@ func (inp *filestream) readFromSource( // isDroppedLine decides if the line is exported or not based on // the include_lines and exclude_lines options. func (inp *filestream) isDroppedLine(log *logp.Logger, line string) bool { - if len(inp.includeLines) > 0 { - if !matchAny(inp.includeLines, line) { + if len(inp.readerConfig.IncludeLines) > 0 { + if !matchAny(inp.readerConfig.IncludeLines, line) { log.Debug("Drop line as it does not match any of the include patterns %s", line) return true } } - if len(inp.excludeLines) > 0 { - if matchAny(inp.excludeLines, line) { + if len(inp.readerConfig.ExcludeLines) > 0 { + if matchAny(inp.readerConfig.ExcludeLines, line) { log.Debug("Drop line as it does match one of the exclude patterns%s", line) return true } diff --git a/filebeat/input/filestream/input_integration_test.go b/filebeat/input/filestream/input_integration_test.go index fecceb38789..89c0c9d792b 100644 --- a/filebeat/input/filestream/input_integration_test.go +++ b/filebeat/input/filestream/input_integration_test.go @@ -20,11 +20,17 @@ package filestream import ( + "bytes" "context" "os" "runtime" "testing" + "github.com/stretchr/testify/require" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/unicode" + "golang.org/x/text/transform" + loginp "github.com/elastic/beats/v7/filebeat/input/filestream/internal/input-logfile" ) @@ -201,3 +207,75 @@ func TestFilestreamEmptyLinesOnly(t *testing.T) { env.requireNoEntryInRegistry(testlogName) } + +// test_bom_utf8 from test_harvester.py +func TestFilestreamBOMUTF8(t *testing.T) { + env := newInputTestingEnvironment(t) + + testlogName := "test.log" + inp := env.mustCreateInput(map[string]interface{}{ + "paths": []string{env.abspath(testlogName)}, + }) + + ctx, cancelInput := context.WithCancel(context.Background()) + env.startInput(ctx, inp) + + // BOM: 0xEF,0xBB,0xBF + lines := append([]byte{0xEF, 0xBB, 0xBF}, []byte(`#Software: Microsoft Exchange Server +#Version: 14.0.0.0 +#Log-type: Message Tracking Log +#Date: 2016-04-05T00:00:02.052Z +#Fields: date-time,client-ip,client-hostname,server-ip,server-hostname,source-context,connector-id,source,event-id,internal-message-id,message-id,recipient-address,recipient-status,total-bytes,recipient-count,related-recipient-address,reference,message-subject,sender-address,return-path,message-info,directionality,tenant-id,original-client-ip,original-server-ip,custom-data +2016-04-05T00:00:02.052Z,,,,,"MDB:61914740-3f1b-4ddb-94e0-557196870cfa, Mailbox:279f077c-216f-4323-a9ee-48e50ffd3cad, Event:269492708, MessageClass:IPM.Note.StorageQuotaWarning.Warning, CreationTime:2016-04-05T00:00:01.022Z, ClientType:System",,STOREDRIVER,NOTIFYMAPI,,,,,,,,,,,,,,,,,S:ItemEntryId=00-00-00-00-37-DB-F9-F9-B5-F2-42-4F-86-62-E6-5D-FC-0C-A1-41-07-00-0E-D6-03-16-80-DC-8C-44-9D-30-07-23-ED-71-B7-F7-00-00-1F-D4-B5-0E-00-00-2E-EF-F2-59-0E-E8-2D-46-BC-31-02-85-0D-67-98-43-00-00-37-4A-A3-B3-00-00 +2016-04-05T00:00:02.145Z,,,,,"MDB:61914740-3f1b-4ddb-94e0-557196870cfa, Mailbox:49cb09c6-5b76-415d-a085-da0ad9079682, Event:269492711, MessageClass:IPM.Note.StorageQuotaWarning.Warning, CreationTime:2016-04-05T00:00:01.038Z, ClientType:System",,STOREDRIVER,NOTIFYMAPI,,,,,,,,,,,,,,,,,S:ItemEntryId=00-00-00-00-97-8F-07-43-51-44-61-4A-AD-BD-29-D4-97-4E-20-A0-07-00-0E-D6-03-16-80-DC-8C-44-9D-30-07-23-ED-71-B7-F7-00-8E-8F-BD-EB-57-00-00-3D-FB-CE-26-A4-8D-46-4C-A4-35-0F-A7-9B-FA-D7-B9-00-00-37-44-2F-CA-00-00 +`)...) + env.mustWriteLinesToFile(testlogName, lines) + + env.waitUntilEventCount(7) + + cancelInput() + env.waitUntilInputStops() + + messages := env.getOutputMessages() + require.Equal(t, messages[0], "#Software: Microsoft Exchange Server") +} + +// test_boms from test_harvester.py +func TestFilestreamUTF16BOMs(t *testing.T) { + encodings := map[string]encoding.Encoding{ + "utf-16be-bom": unicode.UTF16(unicode.BigEndian, unicode.UseBOM), + "utf-16le-bom": unicode.UTF16(unicode.LittleEndian, unicode.UseBOM), + } + + for name, enc := range encodings { + name := name + encoder := enc.NewEncoder() + t.Run(name, func(t *testing.T) { + env := newInputTestingEnvironment(t) + + testlogName := "test.log" + inp := env.mustCreateInput(map[string]interface{}{ + "paths": []string{env.abspath(testlogName)}, + "encoding": name, + }) + + ctx, cancelInput := context.WithCancel(context.Background()) + env.startInput(ctx, inp) + + line := []byte("first line\n") + buf := bytes.NewBuffer(nil) + writer := transform.NewWriter(buf, encoder) + writer.Write(line) + writer.Close() + + env.mustWriteLinesToFile(testlogName, buf.Bytes()) + + env.waitUntilEventCount(1) + + env.requireEventsReceived([]string{"first line"}) + + cancelInput() + env.waitUntilInputStops() + }) + } +} From 89186210a06aae1ae8e43e637902a647677ab203 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 11 Mar 2021 09:53:39 -0500 Subject: [PATCH 7/8] [Elastic Agent] Add the ability to provide custom CA's inside of docker. (#24486) * Add the ability to provide custom CA's for Elastic Agent docker. * Add changelog. --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + .../pkg/agent/application/config.go | 7 +++++- .../pkg/agent/application/enroll_cmd.go | 21 ++++++++-------- .../elastic-agent/pkg/agent/cmd/container.go | 23 ++++++++++++++++++ x-pack/elastic-agent/pkg/agent/cmd/enroll.go | 24 ++++++++++++------- 5 files changed, 57 insertions(+), 19 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 805132d6bdc..302bd0d882b 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -75,3 +75,4 @@ - Add support for enrollment with local bootstrap of Fleet Server {pull}23865[23865] - Add TLS support for Fleet Server {pull}24142[24142] - Add support for Fleet Server running under Elastic Agent {pull}24220[24220] +- Add CA support to Elastic Agent docker image {pull}24486[24486] diff --git a/x-pack/elastic-agent/pkg/agent/application/config.go b/x-pack/elastic-agent/pkg/agent/application/config.go index 6c74b251ca0..cbb24018d30 100644 --- a/x-pack/elastic-agent/pkg/agent/application/config.go +++ b/x-pack/elastic-agent/pkg/agent/application/config.go @@ -28,11 +28,16 @@ func createFleetConfigFromEnroll(accessAPIKey string, kbn *kibana.Config) (*conf return cfg, nil } -func createFleetServerBootstrapConfig(connStr string, policyID string, host string, port uint16, cert string, key string) (*configuration.FleetAgentConfig, error) { +func createFleetServerBootstrapConfig(connStr string, policyID string, host string, port uint16, cert string, key string, esCA string) (*configuration.FleetAgentConfig, error) { es, err := configuration.ElasticsearchFromConnStr(connStr) if err != nil { return nil, err } + if esCA != "" { + es.TLS = &tlscommon.Config{ + CAs: []string{esCA}, + } + } cfg := configuration.DefaultFleetAgentConfig() cfg.Enabled = true cfg.Server = &configuration.FleetServerConfig{ diff --git a/x-pack/elastic-agent/pkg/agent/application/enroll_cmd.go b/x-pack/elastic-agent/pkg/agent/application/enroll_cmd.go index 7d33d9524b4..ff0b22a1159 100644 --- a/x-pack/elastic-agent/pkg/agent/application/enroll_cmd.go +++ b/x-pack/elastic-agent/pkg/agent/application/enroll_cmd.go @@ -81,14 +81,15 @@ type EnrollCmd struct { // EnrollCmdFleetServerOption define all the supported enrollment options for bootstrapping with Fleet Server. type EnrollCmdFleetServerOption struct { - ConnStr string - PolicyID string - Host string - Port uint16 - Cert string - CertKey string - Insecure bool - SpawnAgent bool + ConnStr string + ElasticsearchCA string + PolicyID string + Host string + Port uint16 + Cert string + CertKey string + Insecure bool + SpawnAgent bool } // EnrollCmdOption define all the supported enrollment option. @@ -227,7 +228,7 @@ func (c *EnrollCmd) fleetServerBootstrap(ctx context.Context) error { fleetConfig, err := createFleetServerBootstrapConfig( c.options.FleetServer.ConnStr, c.options.FleetServer.PolicyID, c.options.FleetServer.Host, c.options.FleetServer.Port, - c.options.FleetServer.Cert, c.options.FleetServer.CertKey) + c.options.FleetServer.Cert, c.options.FleetServer.CertKey, c.options.FleetServer.ElasticsearchCA) configToStore := map[string]interface{}{ "fleet": fleetConfig, } @@ -388,7 +389,7 @@ func (c *EnrollCmd) enroll(ctx context.Context) error { serverConfig, err := createFleetServerBootstrapConfig( c.options.FleetServer.ConnStr, c.options.FleetServer.PolicyID, c.options.FleetServer.Host, c.options.FleetServer.Port, - c.options.FleetServer.Cert, c.options.FleetServer.CertKey) + c.options.FleetServer.Cert, c.options.FleetServer.CertKey, c.options.FleetServer.ElasticsearchCA) if err != nil { return err } diff --git a/x-pack/elastic-agent/pkg/agent/cmd/container.go b/x-pack/elastic-agent/pkg/agent/cmd/container.go index cd4b3f7d79e..fdbbaa213a4 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/container.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/container.go @@ -18,6 +18,7 @@ import ( "github.com/spf13/cobra" + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" "github.com/elastic/beats/v7/libbeat/kibana" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" @@ -60,6 +61,7 @@ The following actions are possible and grouped based on the actions. KIBANA_FLEET_HOST - kibana host to enable Fleet on [$KIBANA_HOST] KIBANA_FLEET_USERNAME - kibana username to enable Fleet [$KIBANA_USERNAME] KIBANA_FLEET_PASSWORD - kibana password to enable Fleet [$KIBANA_PASSWORD] + KIBANA_FLEET_CA - path to certificate authority to use with communicate with Kibana [$KIBANA_CA] * Bootstrapping Fleet Server This bootstraps the Fleet Server to be run by this Elastic Agent. At least one Fleet Server is required in a Fleet @@ -69,6 +71,7 @@ The following actions are possible and grouped based on the actions. FLEET_SERVER_ELASTICSEARCH_HOST - elasticsearch host for Fleet Server to communicate with [$ELASTICSEARCH_HOST] FLEET_SERVER_ELASTICSEARCH_USERNAME - elasticsearch username for Fleet Server [$ELASTICSEARCH_USERNAME] FLEET_SERVER_ELASTICSEARCH_PASSWORD - elasticsearch password for Fleet Server [$ELASTICSEARCH_PASSWORD] + FLEET_SERVER_ELASTICSEARCH_CA - path to certificate authority to use with communicate with elasticsearch [$ELASTICSEARCH_CA] FLEET_SERVER_POLICY_NAME - name of policy for the Fleet Server to use for itself [$FLEET_TOKEN_POLICY_NAME] FLEET_SERVER_POLICY_ID - policy ID for Fleet Server to use for itself ("Default Fleet Server policy" used when undefined) FLEET_SERVER_HOST - binding host for Fleet Server HTTP (overrides the policy) @@ -86,6 +89,7 @@ The following actions are possible and grouped based on the actions. FLEET_ENROLLMENT_TOKEN - token to use for enrollment FLEET_TOKEN_NAME - token name to use for fetching token from Kibana FLEET_TOKEN_POLICY_NAME - token policy name to use for fetching token from Kibana + FLEET_CA - path to certificate authority to use with communicate with Fleet Server [$KIBANA_CA] FLEET_INSECURE - communicate with Fleet with either insecure HTTP or un-verified HTTPS KIBANA_FLEET_HOST - kibana host to enable create enrollment token on [$KIBANA_HOST] KIBANA_FLEET_USERNAME - kibana username to create enrollment token [$KIBANA_USERNAME] @@ -97,9 +101,11 @@ be used when the same credentials will be used across all the possible actions a ELASTICSEARCH_HOST - elasticsearch host [http://elasticsearch:9200] ELASTICSEARCH_USERNAME - elasticsearch username [elastic] ELASTICSEARCH_PASSWORD - elasticsearch password [changeme] + ELASTICSEARCH_CA - path to certificate authority to use with communicate with elasticsearch KIBANA_HOST - kibana host [http://kibana:5601] KIBANA_USERNAME - kibana username [$ELASTICSEARCH_USERNAME] KIBANA_PASSWORD - kibana password [$ELASTICSEARCH_PASSWORD] + KIBANA_CA - path to certificate authority to use with communicate with Kibana [$ELASTICSEARCH_CA] By default when this command starts it will check for an existing fleet.yml. If that file already exists then all the above actions will be skipped, because the Elastic Agent has already been enrolled. To ensure that enrollment @@ -199,6 +205,10 @@ func buildEnrollArgs(token string, policyID string) ([]string, error) { if policyID != "" { args = append(args, "--fleet-server-policy", policyID) } + ca := envWithDefault("", "FLEET_SERVER_ELASTICSEARCH_CA", "ELASTICSEARCH_CA") + if ca != "" { + args = append(args, "--fleet-server-elasticsearch-ca", ca) + } host := envWithDefault("", "FLEET_SERVER_HOST") if host != "" { args = append(args, "--fleet-server-host", host) @@ -228,6 +238,10 @@ func buildEnrollArgs(token string, policyID string) ([]string, error) { if envBool("FLEET_INSECURE") { args = append(args, "--insecure") } + ca := envWithDefault("", "FLEET_CA", "KIBANA_CA", "ELASTICSEARCH_CA") + if ca != "" { + args = append(args, "--certificate-authorities", ca) + } } args = append(args, "--enrollment-token", token) return args, nil @@ -291,11 +305,20 @@ func kibanaClient() (*kibana.Client, error) { host := envWithDefault(defaultKibanaHost, "KIBANA_FLEET_HOST", "KIBANA_HOST") username := envWithDefault(defaultUsername, "KIBANA_FLEET_USERNAME", "KIBANA_USERNAME", "ELASTICSEARCH_USERNAME") password := envWithDefault(defaultPassword, "KIBANA_FLEET_PASSWORD", "KIBANA_PASSWORD", "ELASTICSEARCH_PASSWORD") + + var tls *tlscommon.Config + ca := envWithDefault("", "KIBANA_FLEET_CA", "KIBANA_CA", "ELASTICSEARCH_CA") + if ca != "" { + tls = &tlscommon.Config{ + CAs: []string{ca}, + } + } return kibana.NewClientWithConfig(&kibana.ClientConfig{ Host: host, Username: username, Password: password, IgnoreVersion: true, + TLS: tls, }) } diff --git a/x-pack/elastic-agent/pkg/agent/cmd/enroll.go b/x-pack/elastic-agent/pkg/agent/cmd/enroll.go index 15c362e9529..30f581331be 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/enroll.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/enroll.go @@ -52,6 +52,7 @@ func addEnrollFlags(cmd *cobra.Command) { cmd.Flags().StringP("kibana-url", "k", "", "URL of Kibana to enroll Agent into Fleet") cmd.Flags().StringP("enrollment-token", "t", "", "Enrollment token to use to enroll Agent into Fleet") cmd.Flags().StringP("fleet-server", "", "", "Start and run a Fleet Server along side this Elastic Agent") + cmd.Flags().StringP("fleet-server-elasticsearch-ca", "", "", "Path to certificate authority to use with communicate with elasticsearch") cmd.Flags().StringP("fleet-server-policy", "", "", "Start and run a Fleet Server on this specific policy") cmd.Flags().StringP("fleet-server-host", "", "", "Fleet Server HTTP binding host (overrides the policy)") cmd.Flags().Uint16P("fleet-server-port", "", 0, "Fleet Server HTTP binding port (overrides the policy)") @@ -75,6 +76,7 @@ func buildEnrollmentFlags(cmd *cobra.Command, url string, token string) []string token, _ = cmd.Flags().GetString("enrollment-token") } fServer, _ := cmd.Flags().GetString("fleet-server") + fElasticSearchCA, _ := cmd.Flags().GetString("fleet-server-elasticsearch-ca") fPolicy, _ := cmd.Flags().GetString("fleet-server-policy") fHost, _ := cmd.Flags().GetString("fleet-server-host") fPort, _ := cmd.Flags().GetUint16("fleet-server-port") @@ -99,6 +101,10 @@ func buildEnrollmentFlags(cmd *cobra.Command, url string, token string) []string args = append(args, "--fleet-server") args = append(args, fServer) } + if fElasticSearchCA != "" { + args = append(args, "--fleet-server-elasticsearch-ca") + args = append(args, fElasticSearchCA) + } if fPolicy != "" { args = append(args, "--fleet-server-policy") args = append(args, fPolicy) @@ -204,6 +210,7 @@ func enroll(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags, args } enrollmentToken, _ := cmd.Flags().GetString("enrollment-token") fServer, _ := cmd.Flags().GetString("fleet-server") + fElasticSearchCA, _ := cmd.Flags().GetString("fleet-server-elasticsearch-ca") fPolicy, _ := cmd.Flags().GetString("fleet-server-policy") fHost, _ := cmd.Flags().GetString("fleet-server-host") fPort, _ := cmd.Flags().GetUint16("fleet-server-port") @@ -228,14 +235,15 @@ func enroll(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags, args UserProvidedMetadata: make(map[string]interface{}), Staging: staging, FleetServer: application.EnrollCmdFleetServerOption{ - ConnStr: fServer, - PolicyID: fPolicy, - Host: fHost, - Port: fPort, - Cert: fCert, - CertKey: fCertKey, - Insecure: fInsecure, - SpawnAgent: !fromInstall, + ConnStr: fServer, + ElasticsearchCA: fElasticSearchCA, + PolicyID: fPolicy, + Host: fHost, + Port: fPort, + Cert: fCert, + CertKey: fCertKey, + Insecure: fInsecure, + SpawnAgent: !fromInstall, }, } From 416bc0010663db89459359874aaf0c4c20b9fdc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Thu, 11 Mar 2021 15:54:57 +0100 Subject: [PATCH 8/8] Update Golang to 1.15.9 (#24442) --- .go-version | 2 +- CHANGELOG-developer.next.asciidoc | 1 + auditbeat/Dockerfile | 2 +- filebeat/Dockerfile | 2 +- heartbeat/Dockerfile | 2 +- journalbeat/Dockerfile | 2 +- libbeat/Dockerfile | 2 +- libbeat/docs/version.asciidoc | 2 +- metricbeat/Dockerfile | 2 +- metricbeat/module/http/_meta/Dockerfile | 2 +- packetbeat/Dockerfile | 2 +- x-pack/functionbeat/Dockerfile | 2 +- x-pack/libbeat/Dockerfile | 2 +- 13 files changed, 13 insertions(+), 12 deletions(-) diff --git a/.go-version b/.go-version index 98e863cdf81..30a88ea45a4 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.15.8 +1.15.9 diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index 2b319225a97..d34704ecaac 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -109,3 +109,4 @@ The list below covers the major changes between 7.0.0-rc2 and master only. - Add support for customized monitoring API. {pull}22605[22605] - Update Go version to 1.15.7. {pull}22495[22495] - Update Go version to 1.15.8. {pull}23955[23955] +- Update Go version to 1.15.9. {pull}24442[24442] diff --git a/auditbeat/Dockerfile b/auditbeat/Dockerfile index 98c459dc4f4..4435e1aa944 100644 --- a/auditbeat/Dockerfile +++ b/auditbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.15.8 +FROM golang:1.15.9 RUN \ apt-get update \ diff --git a/filebeat/Dockerfile b/filebeat/Dockerfile index 3750939e8ce..27c59548eb2 100644 --- a/filebeat/Dockerfile +++ b/filebeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.15.8 +FROM golang:1.15.9 RUN \ apt-get update \ diff --git a/heartbeat/Dockerfile b/heartbeat/Dockerfile index ed2c4d10918..0f6bdf7b3a3 100644 --- a/heartbeat/Dockerfile +++ b/heartbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.15.8 +FROM golang:1.15.9 RUN \ apt-get update \ diff --git a/journalbeat/Dockerfile b/journalbeat/Dockerfile index 096b2ec1e79..da708d84a56 100644 --- a/journalbeat/Dockerfile +++ b/journalbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.15.8 +FROM golang:1.15.9 RUN \ apt-get update \ diff --git a/libbeat/Dockerfile b/libbeat/Dockerfile index 6c99894603c..b762849968f 100644 --- a/libbeat/Dockerfile +++ b/libbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.15.8 +FROM golang:1.15.9 RUN \ apt-get update \ diff --git a/libbeat/docs/version.asciidoc b/libbeat/docs/version.asciidoc index 40b2d1d836d..b9212db4e41 100644 --- a/libbeat/docs/version.asciidoc +++ b/libbeat/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.0.0 :doc-branch: master -:go-version: 1.15.8 +:go-version: 1.15.9 :release-state: unreleased :python: 3.7 :docker: 1.12 diff --git a/metricbeat/Dockerfile b/metricbeat/Dockerfile index 4d96de35265..61805ca85c0 100644 --- a/metricbeat/Dockerfile +++ b/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.15.8 +FROM golang:1.15.9 RUN \ apt update \ diff --git a/metricbeat/module/http/_meta/Dockerfile b/metricbeat/module/http/_meta/Dockerfile index 64d1de65fe0..07d81b3c5b9 100644 --- a/metricbeat/module/http/_meta/Dockerfile +++ b/metricbeat/module/http/_meta/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.15.8 +FROM golang:1.15.9 COPY test/main.go main.go diff --git a/packetbeat/Dockerfile b/packetbeat/Dockerfile index f48ccaadd29..416f99f900d 100644 --- a/packetbeat/Dockerfile +++ b/packetbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.15.8 +FROM golang:1.15.9 RUN \ apt-get update \ diff --git a/x-pack/functionbeat/Dockerfile b/x-pack/functionbeat/Dockerfile index 53950aa33b6..3aa5e4bf820 100644 --- a/x-pack/functionbeat/Dockerfile +++ b/x-pack/functionbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.15.8 +FROM golang:1.15.9 RUN \ apt-get update \ diff --git a/x-pack/libbeat/Dockerfile b/x-pack/libbeat/Dockerfile index 935a38c8365..0c51c2ea88f 100644 --- a/x-pack/libbeat/Dockerfile +++ b/x-pack/libbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.15.8 +FROM golang:1.15.9 RUN \ apt-get update \