From a9a277999de749383699125b9c7e7f058f20886d Mon Sep 17 00:00:00 2001 From: Pier-Hugues Pellerin Date: Fri, 6 Mar 2020 13:46:25 -0500 Subject: [PATCH] Merge agent to master (#16494) This PR move the development of the Elastic Agent inside the master branch, the Agent can be configured locally or using the ingest manager in Kibana. The Agent will take care of starting or stopping the processes depending on the configuration needs. Co-authored-by: Pier-Hugues Pellerin Co-authored-by: Michal Pristas --- .travis.yml | 10 + Makefile | 2 +- NOTICE.txt | 1114 ++++- filebeat/filebeat.spec | 1 + go.mod | 10 +- go.sum | 51 +- libbeat/cmd/instance/beat.go | 2 +- libbeat/management/management.go | 25 +- vendor/github.com/antlr/antlr4/LICENSE.txt | 52 + .../antlr/antlr4/runtime/Go/antlr/atn.go | 152 + .../antlr4/runtime/Go/antlr/atn_config.go | 295 ++ .../antlr4/runtime/Go/antlr/atn_config_set.go | 387 ++ .../Go/antlr/atn_deserialization_options.go | 25 + .../runtime/Go/antlr/atn_deserializer.go | 828 ++++ .../antlr4/runtime/Go/antlr/atn_simulator.go | 50 + .../antlr4/runtime/Go/antlr/atn_state.go | 386 ++ .../antlr/antlr4/runtime/Go/antlr/atn_type.go | 11 + .../antlr4/runtime/Go/antlr/char_stream.go | 12 + .../runtime/Go/antlr/common_token_factory.go | 56 + .../runtime/Go/antlr/common_token_stream.go | 447 ++ .../antlr/antlr4/runtime/Go/antlr/dfa.go | 171 + .../antlr4/runtime/Go/antlr/dfa_serializer.go | 152 + .../antlr4/runtime/Go/antlr/dfa_state.go | 166 + .../Go/antlr/diagnostic_error_listener.go | 111 + .../antlr4/runtime/Go/antlr/error_listener.go | 108 + .../antlr4/runtime/Go/antlr/error_strategy.go | 758 +++ .../antlr/antlr4/runtime/Go/antlr/errors.go | 241 + .../antlr4/runtime/Go/antlr/file_stream.go | 49 + .../antlr4/runtime/Go/antlr/input_stream.go | 113 + .../antlr4/runtime/Go/antlr/int_stream.go | 16 + .../antlr4/runtime/Go/antlr/interval_set.go | 296 ++ .../antlr/antlr4/runtime/Go/antlr/lexer.go | 417 ++ .../antlr4/runtime/Go/antlr/lexer_action.go | 431 ++ .../runtime/Go/antlr/lexer_action_executor.go | 170 + .../runtime/Go/antlr/lexer_atn_simulator.go | 658 +++ .../antlr4/runtime/Go/antlr/ll1_analyzer.go | 215 + .../antlr/antlr4/runtime/Go/antlr/parser.go | 718 +++ .../runtime/Go/antlr/parser_atn_simulator.go | 1473 ++++++ .../runtime/Go/antlr/parser_rule_context.go | 362 ++ .../runtime/Go/antlr/prediction_context.go | 756 +++ .../runtime/Go/antlr/prediction_mode.go | 553 +++ .../antlr4/runtime/Go/antlr/recognizer.go | 217 + .../antlr4/runtime/Go/antlr/rule_context.go | 114 + .../runtime/Go/antlr/semantic_context.go | 455 ++ .../antlr/antlr4/runtime/Go/antlr/token.go | 210 + .../antlr4/runtime/Go/antlr/token_source.go | 17 + .../antlr4/runtime/Go/antlr/token_stream.go | 20 + .../runtime/Go/antlr/tokenstream_rewriter.go | 649 +++ .../antlr4/runtime/Go/antlr/trace_listener.go | 32 + .../antlr4/runtime/Go/antlr/transition.go | 421 ++ .../antlr/antlr4/runtime/Go/antlr/tree.go | 251 + .../antlr/antlr4/runtime/Go/antlr/trees.go | 137 + .../antlr/antlr4/runtime/Go/antlr/utils.go | 417 ++ vendor/github.com/hashicorp/errwrap/LICENSE | 354 ++ vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 169 + .../hashicorp/go-multierror/.travis.yml | 12 + .../hashicorp/go-multierror/LICENSE | 353 ++ .../hashicorp/go-multierror/Makefile | 31 + .../hashicorp/go-multierror/README.md | 97 + .../hashicorp/go-multierror/append.go | 41 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../hashicorp/go-multierror/multierror.go | 51 + .../hashicorp/go-multierror/prefix.go | 37 + vendor/github.com/oklog/ulid/.gitignore | 29 + vendor/github.com/oklog/ulid/.travis.yml | 16 + vendor/github.com/oklog/ulid/AUTHORS.md | 2 + vendor/github.com/oklog/ulid/CHANGELOG.md | 33 + vendor/github.com/oklog/ulid/CONTRIBUTING.md | 17 + vendor/github.com/oklog/ulid/Gopkg.lock | 15 + vendor/github.com/oklog/ulid/Gopkg.toml | 26 + vendor/github.com/oklog/ulid/LICENSE | 201 + vendor/github.com/oklog/ulid/README.md | 150 + vendor/github.com/oklog/ulid/ulid.go | 614 +++ vendor/github.com/stretchr/objx/.travis.yml | 13 +- vendor/github.com/stretchr/objx/Gopkg.lock | 30 - vendor/github.com/stretchr/objx/Gopkg.toml | 8 - vendor/github.com/stretchr/objx/README.md | 2 +- vendor/github.com/stretchr/objx/Taskfile.yml | 50 +- .../github.com/stretchr/objx/conversions.go | 62 + vendor/github.com/stretchr/objx/go.mod | 8 + vendor/github.com/stretchr/objx/go.sum | 8 + .../github.com/stretchr/objx/type_specific.go | 29 + vendor/github.com/urso/diag-ecs/LICENSE | 201 + vendor/github.com/urso/diag-ecs/ecs/ecs.go | 4086 +++++++++++++++++ vendor/github.com/urso/diag/.gitignore | 15 + vendor/github.com/urso/diag/LICENSE | 201 + vendor/github.com/urso/diag/README.md | 38 + vendor/github.com/urso/diag/ctxfmt/args.go | 21 + vendor/github.com/urso/diag/ctxfmt/ctxfmt.go | 101 + vendor/github.com/urso/diag/ctxfmt/error.go | 17 + .../github.com/urso/diag/ctxfmt/interpret.go | 1146 +++++ vendor/github.com/urso/diag/ctxfmt/parse.go | 279 ++ vendor/github.com/urso/diag/ctxfmt/print.go | 90 + vendor/github.com/urso/diag/ctxfmt/util.go | 43 + .../github.com/urso/diag/ctxfmt/util_go11.go | 38 + .../github.com/urso/diag/ctxfmt/util_go12.go | 15 + vendor/github.com/urso/diag/diag.go | 433 ++ vendor/github.com/urso/diag/doc.go | 40 + vendor/github.com/urso/diag/fld.go | 59 + vendor/github.com/urso/diag/go.mod | 5 + vendor/github.com/urso/diag/go.sum | 4 + vendor/github.com/urso/diag/gocontext.go | 63 + vendor/github.com/urso/diag/value.go | 165 + vendor/github.com/urso/ecslog/.gitignore | 12 + vendor/github.com/urso/ecslog/.travis.yml | 7 + vendor/github.com/urso/ecslog/LICENSE | 201 + vendor/github.com/urso/ecslog/README.md | 470 ++ .../urso/ecslog/backend/appender/file.go | 84 + .../urso/ecslog/backend/appender/writer.go | 76 + .../github.com/urso/ecslog/backend/backend.go | 51 + .../github.com/urso/ecslog/backend/caller.go | 56 + .../urso/ecslog/backend/layout/dyn.go | 31 + .../urso/ecslog/backend/layout/layout.go | 20 + .../urso/ecslog/backend/layout/plain.go | 214 + .../urso/ecslog/backend/layout/structured.go | 385 ++ vendor/github.com/urso/ecslog/go.mod | 12 + vendor/github.com/urso/ecslog/go.sum | 101 + vendor/github.com/urso/ecslog/log.go | 259 ++ .../urso/magetools/gotool/gotool.go | 2 + .../github.com/urso/magetools/gotool/run.go | 35 + vendor/github.com/urso/sderr/LICENSE | 201 + vendor/github.com/urso/sderr/builder.go | 151 + vendor/github.com/urso/sderr/errors.go | 252 + vendor/github.com/urso/sderr/go.mod | 5 + vendor/github.com/urso/sderr/go.sum | 6 + vendor/github.com/urso/sderr/query.go | 365 ++ vendor/github.com/urso/sderr/sderr.go | 50 + vendor/github.com/urso/sderr/stacktrace.go | 110 + vendor/gopkg.in/yaml.v2/scannerc.go | 47 +- vendor/gopkg.in/yaml.v2/yamlh.go | 1 + vendor/modules.txt | 26 +- x-pack/agent/.gitignore | 8 + x-pack/agent/Dockerfile | 16 + x-pack/agent/Makefile | 13 + x-pack/agent/_meta/agent.docker.yml | 87 + x-pack/agent/_meta/agent.fleet.yml | 45 + x-pack/agent/_meta/agent.yml | 88 + x-pack/agent/_meta/common.p1.yml | 5 + x-pack/agent/_meta/common.p2.yml | 113 + x-pack/agent/_meta/common.reference.p1.yml | 5 + x-pack/agent/_meta/common.reference.p2.yml | 113 + x-pack/agent/agent.docker.yml | 87 + x-pack/agent/agent.reference.yml | 118 + x-pack/agent/agent.yml | 118 + x-pack/agent/cmd/agent/agent.go | 25 + .../cmd/buildfleetcfg/buildfleetcfg.go | 110 + .../dev-tools/cmd/buildspec/buildspec.go | 120 + .../agent/dev-tools/cmd/fakewebapi/README.md | 13 + .../cmd/fakewebapi/action_example.json | 39 + .../dev-tools/cmd/fakewebapi/checkin.json | 4 + .../agent/dev-tools/cmd/fakewebapi/fetch.sh | 5 + x-pack/agent/dev-tools/cmd/fakewebapi/main.go | 166 + x-pack/agent/dev-tools/cmd/fakewebapi/push.sh | 3 + x-pack/agent/docker-compose.yml | 9 + x-pack/agent/docs/agent.asciidoc | 8 + .../docs/agent_configuration_example.yml | 565 +++ x-pack/agent/magefile.go | 473 ++ x-pack/agent/main.go | 24 + x-pack/agent/main_test.go | 29 + .../agent/application/action_dispatcher.go | 113 + .../application/action_dispatcher_test.go | 101 + .../pkg/agent/application/action_store.go | 157 + .../agent/application/action_store_test.go | 104 + .../pkg/agent/application/application.go | 71 + .../pkg/agent/application/application_test.go | 5 + x-pack/agent/pkg/agent/application/config.go | 160 + .../pkg/agent/application/config_request.go | 52 + .../agent/application/config_request_test.go | 27 + .../pkg/agent/application/config_test.go | 74 + .../agent/application/configuration_embed.go | 25 + x-pack/agent/pkg/agent/application/emitter.go | 64 + .../agent/pkg/agent/application/enroll_cmd.go | 191 + .../pkg/agent/application/enroll_cmd_test.go | 400 ++ x-pack/agent/pkg/agent/application/error.go | 15 + .../pkg/agent/application/fleet_acker.go | 94 + .../pkg/agent/application/fleet_acker_test.go | 66 + .../pkg/agent/application/fleet_gateway.go | 217 + .../agent/application/fleet_gateway_test.go | 458 ++ .../pkg/agent/application/global_config.go | 60 + .../handler_action_policy_change.go | 40 + .../handler_action_policy_change_test.go | 151 + .../pkg/agent/application/handler_default.go | 20 + .../pkg/agent/application/handler_unknown.go | 20 + .../pkg/agent/application/info/agent_id.go | 142 + .../pkg/agent/application/info/agent_info.go | 46 + .../agent/pkg/agent/application/lazy_acker.go | 54 + .../pkg/agent/application/lazy_acker_test.go | 124 + .../agent/pkg/agent/application/local_meta.go | 25 + .../agent/pkg/agent/application/local_mode.go | 146 + .../pkg/agent/application/managed_mode.go | 205 + .../agent/application/monitoring_decorator.go | 70 + .../application/monitoring_decorator_test.go | 183 + x-pack/agent/pkg/agent/application/once.go | 37 + .../agent/pkg/agent/application/periodic.go | 124 + x-pack/agent/pkg/agent/application/router.go | 114 + .../pkg/agent/application/router_test.go | 223 + x-pack/agent/pkg/agent/application/stream.go | 99 + x-pack/agent/pkg/agent/cmd/cmd_test.go | 32 + x-pack/agent/pkg/agent/cmd/common.go | 66 + x-pack/agent/pkg/agent/cmd/enroll.go | 113 + x-pack/agent/pkg/agent/cmd/run.go | 65 + .../agent/pkg/agent/configrequest/request.go | 18 + x-pack/agent/pkg/agent/configrequest/step.go | 31 + x-pack/agent/pkg/agent/errors/error.go | 103 + x-pack/agent/pkg/agent/errors/error_test.go | 194 + x-pack/agent/pkg/agent/errors/generators.go | 55 + x-pack/agent/pkg/agent/errors/types.go | 49 + .../agent/pkg/agent/internal/yamltest/yaml.go | 54 + .../pkg/agent/operation/config/config.go | 22 + .../pkg/agent/operation/event_processor.go | 26 + .../agent/pkg/agent/operation/monitoring.go | 273 ++ .../pkg/agent/operation/monitoring_test.go | 151 + x-pack/agent/pkg/agent/operation/operation.go | 48 + .../pkg/agent/operation/operation_config.go | 67 + .../pkg/agent/operation/operation_fetch.go | 87 + .../pkg/agent/operation/operation_install.go | 71 + .../pkg/agent/operation/operation_remove.go | 48 + .../pkg/agent/operation/operation_start.go | 76 + .../pkg/agent/operation/operation_stop.go | 64 + .../pkg/agent/operation/operation_verify.go | 49 + x-pack/agent/pkg/agent/operation/operator.go | 275 ++ .../pkg/agent/operation/operator_handlers.go | 95 + .../pkg/agent/operation/operator_test.go | 454 ++ .../configurable-1.0-darwin-x86/README.md | 1 + .../configurable-1.0-darwin-x86/main.go | 63 + .../README.md | 2 + .../configurablebyfile-1.0-darwin-x86/main.go | 52 + .../scripts/long-1.0-darwin-x86/README.md | 1 + .../scripts/short-1.0-darwin-x86/README.md | 1 + x-pack/agent/pkg/agent/program/methods.go | 142 + x-pack/agent/pkg/agent/program/program.go | 261 ++ .../agent/pkg/agent/program/program_test.go | 608 +++ x-pack/agent/pkg/agent/program/spec.go | 88 + x-pack/agent/pkg/agent/program/spec_test.go | 115 + x-pack/agent/pkg/agent/program/supported.go | 33 + .../testdata/audit_config-auditbeat.yml | 22 + .../agent/program/testdata/audit_config.yml | 28 + .../agent/program/testdata/enabled_false.yml | 16 + .../program/testdata/enabled_output_false.yml | 16 + .../testdata/enabled_output_true-filebeat.yml | 14 + .../program/testdata/enabled_output_true.yml | 16 + .../testdata/enabled_true-filebeat.yml | 14 + .../agent/program/testdata/enabled_true.yml | 16 + .../testdata/journal_config-journalbeat.yml | 15 + .../agent/program/testdata/journal_config.yml | 21 + .../testdata/monitor_config-heartbeat.yml | 18 + .../agent/program/testdata/monitor_config.yml | 23 + .../testdata/single_config-filebeat.yml | 13 + .../testdata/single_config-metricbeat.yml | 11 + .../agent/program/testdata/single_config.yml | 19 + .../agent/pkg/agent/stateresolver/resolve.go | 179 + .../pkg/agent/stateresolver/resolve_test.go | 381 ++ .../agent/stateresolver/statechange_string.go | 30 + .../pkg/agent/stateresolver/stateresolver.go | 63 + .../agent/stateresolver/stateresolver_test.go | 63 + x-pack/agent/pkg/agent/storage/storage.go | 271 ++ .../agent/pkg/agent/storage/storage_test.go | 228 + x-pack/agent/pkg/agent/transpiler/ast.go | 769 ++++ x-pack/agent/pkg/agent/transpiler/ast_test.go | 1105 +++++ .../agent/pkg/agent/transpiler/map_visitor.go | 93 + x-pack/agent/pkg/agent/transpiler/rules.go | 684 +++ .../agent/pkg/agent/transpiler/rules_test.go | 501 ++ x-pack/agent/pkg/agent/transpiler/visitor.go | 31 + x-pack/agent/pkg/artifact/artifact.go | 43 + x-pack/agent/pkg/artifact/config.go | 76 + .../artifact/download/composed/downloader.go | 48 + .../download/composed/downloader_test.go | 78 + .../artifact/download/composed/verifier.go | 45 + .../agent/pkg/artifact/download/downloader.go | 12 + .../pkg/artifact/download/fs/downloader.go | 96 + .../pkg/artifact/download/fs/verifier.go | 105 + .../pkg/artifact/download/http/downloader.go | 120 + .../artifact/download/http/elastic_test.go | 182 + .../pkg/artifact/download/http/headers_rtt.go | 26 + .../download/http/headers_rtt_test.go | 38 + .../pkg/artifact/download/http/verifier.go | 156 + .../download/localremote/downloader.go | 19 + .../artifact/download/localremote/verifier.go | 28 + .../agent/pkg/artifact/download/verifier.go | 10 + .../agent/pkg/artifact/install/installer.go | 43 + .../pkg/artifact/install/tar/tar_installer.go | 111 + .../pkg/artifact/install/zip/zip_installer.go | 71 + x-pack/agent/pkg/basecmd/cmd.go | 19 + x-pack/agent/pkg/basecmd/cmd_test.go | 16 + x-pack/agent/pkg/basecmd/version/cmd.go | 31 + x-pack/agent/pkg/basecmd/version/cmd_test.go | 26 + x-pack/agent/pkg/boolexp/Boolexp.g4 | 55 + x-pack/agent/pkg/boolexp/boolexp.go | 17 + x-pack/agent/pkg/boolexp/boolexp_test.go | 272 ++ x-pack/agent/pkg/boolexp/compare.go | 283 ++ x-pack/agent/pkg/boolexp/expression.go | 76 + x-pack/agent/pkg/boolexp/methods.go | 58 + .../agent/pkg/boolexp/parser/Boolexp.interp | 55 + .../agent/pkg/boolexp/parser/Boolexp.tokens | 29 + .../pkg/boolexp/parser/BoolexpLexer.interp | 79 + .../pkg/boolexp/parser/BoolexpLexer.tokens | 29 + .../boolexp/parser/boolexp_base_listener.go | 140 + .../boolexp/parser/boolexp_base_visitor.go | 89 + .../agent/pkg/boolexp/parser/boolexp_lexer.go | 191 + .../pkg/boolexp/parser/boolexp_listener.go | 128 + .../pkg/boolexp/parser/boolexp_parser.go | 1952 ++++++++ .../pkg/boolexp/parser/boolexp_visitor.go | 71 + x-pack/agent/pkg/boolexp/visitor.go | 241 + x-pack/agent/pkg/cli/flags.go | 24 + x-pack/agent/pkg/cli/flags_test.go | 17 + x-pack/agent/pkg/cli/streams.go | 39 + x-pack/agent/pkg/config/config.go | 122 + x-pack/agent/pkg/config/config_test.go | 85 + x-pack/agent/pkg/core/logger/logger.go | 36 + x-pack/agent/pkg/core/logger/logger_test.go | 11 + x-pack/agent/pkg/core/plugin/app/app.go | 190 + x-pack/agent/pkg/core/plugin/app/client.go | 10 + x-pack/agent/pkg/core/plugin/app/configure.go | 83 + .../agent/pkg/core/plugin/app/descriptor.go | 149 + .../pkg/core/plugin/app/execution_context.go | 34 + .../app/monitoring/beats/beats_monitor.go | 199 + .../plugin/app/monitoring/beats/drop_test.go | 50 + .../plugin/app/monitoring/beats/monitoring.go | 42 + .../pkg/core/plugin/app/monitoring/config.go | 11 + .../pkg/core/plugin/app/monitoring/monitor.go | 32 + .../app/monitoring/noop/noop_monitor.go | 48 + .../agent/pkg/core/plugin/app/process_cred.go | 63 + .../pkg/core/plugin/app/process_cred_other.go | 12 + x-pack/agent/pkg/core/plugin/app/spec.go | 43 + x-pack/agent/pkg/core/plugin/app/start.go | 358 ++ x-pack/agent/pkg/core/plugin/app/tag.go | 12 + .../agent/pkg/core/plugin/app/watch_posix.go | 33 + .../pkg/core/plugin/app/watch_windows.go | 55 + x-pack/agent/pkg/core/plugin/authority/ca.go | 146 + x-pack/agent/pkg/core/plugin/process/cmd.go | 23 + .../agent/pkg/core/plugin/process/cmd_cred.go | 36 + .../agent/pkg/core/plugin/process/config.go | 21 + .../agent/pkg/core/plugin/process/process.go | 204 + x-pack/agent/pkg/core/plugin/retry/config.go | 30 + x-pack/agent/pkg/core/plugin/retry/error.go | 30 + .../pkg/core/plugin/retry/retrystrategy.go | 110 + .../core/plugin/retry/retrystrategy_test.go | 182 + x-pack/agent/pkg/core/plugin/server/server.go | 93 + x-pack/agent/pkg/core/plugin/state/state.go | 25 + x-pack/agent/pkg/core/remoteconfig/config.go | 31 + .../core/remoteconfig/grpc/configclient.go | 93 + .../remoteconfig/grpc/connection_provider.go | 55 + .../pkg/core/remoteconfig/grpc/factory.go | 79 + .../core/remoteconfig/grpc/noop_backoff.go | 26 + .../remoteconfig/grpc/remote_config.pb.go | 310 ++ x-pack/agent/pkg/crypto/io.go | 378 ++ x-pack/agent/pkg/crypto/io_test.go | 195 + x-pack/agent/pkg/dir/discover.go | 32 + x-pack/agent/pkg/dir/discover_test.go | 71 + x-pack/agent/pkg/filewatcher/watcher.go | 260 ++ x-pack/agent/pkg/filewatcher/watcher_test.go | 228 + x-pack/agent/pkg/fleetapi/ack_cmd.go | 106 + x-pack/agent/pkg/fleetapi/ack_cmd_test.go | 76 + x-pack/agent/pkg/fleetapi/action.go | 132 + x-pack/agent/pkg/fleetapi/checkin_cmd.go | 117 + x-pack/agent/pkg/fleetapi/checkin_cmd_test.go | 293 ++ x-pack/agent/pkg/fleetapi/client.go | 121 + x-pack/agent/pkg/fleetapi/client_test.go | 179 + x-pack/agent/pkg/fleetapi/custom_type.go | 20 + x-pack/agent/pkg/fleetapi/custom_type_test.go | 23 + x-pack/agent/pkg/fleetapi/enroll_cmd.go | 212 + x-pack/agent/pkg/fleetapi/enroll_cmd_test.go | 134 + x-pack/agent/pkg/fleetapi/helper_test.go | 64 + x-pack/agent/pkg/fleetapi/round_trippers.go | 72 + x-pack/agent/pkg/id/generate.go | 34 + x-pack/agent/pkg/id/generate_test.go | 17 + x-pack/agent/pkg/kibana/client.go | 220 + x-pack/agent/pkg/kibana/client_test.go | 326 ++ x-pack/agent/pkg/kibana/config.go | 53 + x-pack/agent/pkg/kibana/round_trippers.go | 188 + x-pack/agent/pkg/packer/packer.go | 106 + x-pack/agent/pkg/packer/packer_test.go | 128 + x-pack/agent/pkg/release/release_dev.go | 16 + x-pack/agent/pkg/release/version.go | 41 + x-pack/agent/pkg/release/version_test.go | 42 + x-pack/agent/pkg/reporter/backend.go | 13 + x-pack/agent/pkg/reporter/event.go | 30 + x-pack/agent/pkg/reporter/fleet/config.go | 19 + x-pack/agent/pkg/reporter/fleet/reporter.go | 181 + .../agent/pkg/reporter/fleet/reporter_test.go | 241 + x-pack/agent/pkg/reporter/log/config.go | 17 + x-pack/agent/pkg/reporter/log/format.go | 59 + x-pack/agent/pkg/reporter/log/reporter.go | 91 + .../agent/pkg/reporter/log/reporter_test.go | 108 + x-pack/agent/pkg/reporter/noop/reporter.go | 28 + x-pack/agent/pkg/reporter/reporter.go | 130 + x-pack/agent/pkg/reporter/reporter_test.go | 90 + x-pack/agent/pkg/scheduler/scheduler.go | 134 + x-pack/agent/pkg/scheduler/scheduler_test.go | 193 + x-pack/agent/pkg/sorted/set.go | 60 + x-pack/agent/pkg/sorted/set_test.go | 161 + x-pack/agent/pkg/tokenbucket/token_bucket.go | 82 + .../pkg/tokenbucket/token_bucket_test.go | 104 + x-pack/agent/proto/remote_config.proto | 22 + x-pack/agent/spec/auditbeat.yml.disabled | 34 + x-pack/agent/spec/filebeat.yml | 42 + x-pack/agent/spec/heartbeat.yml.disabled | 27 + x-pack/agent/spec/journalbeat.yml.disabled | 22 + x-pack/agent/spec/metricbeat.yml | 32 + x-pack/filebeat/magefile.go | 8 +- x-pack/libbeat/cmd/inject.go | 2 + x-pack/libbeat/management/config.go | 17 +- x-pack/libbeat/management/error.go | 3 +- x-pack/libbeat/management/error_test.go | 6 +- x-pack/libbeat/management/fleet/config.go | 27 + .../libbeat/management/fleet/config_server.go | 61 + x-pack/libbeat/management/fleet/manager.go | 279 ++ .../libbeat/management/fleet/manager_test.go | 60 + x-pack/libbeat/management/fleet/plugin.go | 32 + x-pack/libbeat/management/manager.go | 15 +- x-pack/libbeat/management/manager_test.go | 4 + x-pack/libbeat/management/plugin.go | 31 + x-pack/metricbeat/magefile.go | 8 +- 415 files changed, 57474 insertions(+), 278 deletions(-) create mode 100644 filebeat/filebeat.spec create mode 100644 vendor/github.com/antlr/antlr4/LICENSE.txt create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/trace_listener.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/transition.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/go-multierror/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/oklog/ulid/.gitignore create mode 100644 vendor/github.com/oklog/ulid/.travis.yml create mode 100644 vendor/github.com/oklog/ulid/AUTHORS.md create mode 100644 vendor/github.com/oklog/ulid/CHANGELOG.md create mode 100644 vendor/github.com/oklog/ulid/CONTRIBUTING.md create mode 100644 vendor/github.com/oklog/ulid/Gopkg.lock create mode 100644 vendor/github.com/oklog/ulid/Gopkg.toml create mode 100644 vendor/github.com/oklog/ulid/LICENSE create mode 100644 vendor/github.com/oklog/ulid/README.md create mode 100644 vendor/github.com/oklog/ulid/ulid.go delete mode 100644 vendor/github.com/stretchr/objx/Gopkg.lock delete mode 100644 vendor/github.com/stretchr/objx/Gopkg.toml create mode 100644 vendor/github.com/stretchr/objx/go.mod create mode 100644 vendor/github.com/stretchr/objx/go.sum create mode 100644 vendor/github.com/urso/diag-ecs/LICENSE create mode 100644 vendor/github.com/urso/diag-ecs/ecs/ecs.go create mode 100644 vendor/github.com/urso/diag/.gitignore create mode 100644 vendor/github.com/urso/diag/LICENSE create mode 100644 vendor/github.com/urso/diag/README.md create mode 100644 vendor/github.com/urso/diag/ctxfmt/args.go create mode 100644 vendor/github.com/urso/diag/ctxfmt/ctxfmt.go create mode 100644 vendor/github.com/urso/diag/ctxfmt/error.go create mode 100644 vendor/github.com/urso/diag/ctxfmt/interpret.go create mode 100644 vendor/github.com/urso/diag/ctxfmt/parse.go create mode 100644 vendor/github.com/urso/diag/ctxfmt/print.go create mode 100644 vendor/github.com/urso/diag/ctxfmt/util.go create mode 100644 vendor/github.com/urso/diag/ctxfmt/util_go11.go create mode 100644 vendor/github.com/urso/diag/ctxfmt/util_go12.go create mode 100644 vendor/github.com/urso/diag/diag.go create mode 100644 vendor/github.com/urso/diag/doc.go create mode 100644 vendor/github.com/urso/diag/fld.go create mode 100644 vendor/github.com/urso/diag/go.mod create mode 100644 vendor/github.com/urso/diag/go.sum create mode 100644 vendor/github.com/urso/diag/gocontext.go create mode 100644 vendor/github.com/urso/diag/value.go create mode 100644 vendor/github.com/urso/ecslog/.gitignore create mode 100644 vendor/github.com/urso/ecslog/.travis.yml create mode 100644 vendor/github.com/urso/ecslog/LICENSE create mode 100644 vendor/github.com/urso/ecslog/README.md create mode 100644 vendor/github.com/urso/ecslog/backend/appender/file.go create mode 100644 vendor/github.com/urso/ecslog/backend/appender/writer.go create mode 100644 vendor/github.com/urso/ecslog/backend/backend.go create mode 100644 vendor/github.com/urso/ecslog/backend/caller.go create mode 100644 vendor/github.com/urso/ecslog/backend/layout/dyn.go create mode 100644 vendor/github.com/urso/ecslog/backend/layout/layout.go create mode 100644 vendor/github.com/urso/ecslog/backend/layout/plain.go create mode 100644 vendor/github.com/urso/ecslog/backend/layout/structured.go create mode 100644 vendor/github.com/urso/ecslog/go.mod create mode 100644 vendor/github.com/urso/ecslog/go.sum create mode 100644 vendor/github.com/urso/ecslog/log.go create mode 100644 vendor/github.com/urso/magetools/gotool/run.go create mode 100644 vendor/github.com/urso/sderr/LICENSE create mode 100644 vendor/github.com/urso/sderr/builder.go create mode 100644 vendor/github.com/urso/sderr/errors.go create mode 100644 vendor/github.com/urso/sderr/go.mod create mode 100644 vendor/github.com/urso/sderr/go.sum create mode 100644 vendor/github.com/urso/sderr/query.go create mode 100644 vendor/github.com/urso/sderr/sderr.go create mode 100644 vendor/github.com/urso/sderr/stacktrace.go create mode 100644 x-pack/agent/.gitignore create mode 100644 x-pack/agent/Dockerfile create mode 100644 x-pack/agent/Makefile create mode 100644 x-pack/agent/_meta/agent.docker.yml create mode 100644 x-pack/agent/_meta/agent.fleet.yml create mode 100644 x-pack/agent/_meta/agent.yml create mode 100644 x-pack/agent/_meta/common.p1.yml create mode 100644 x-pack/agent/_meta/common.p2.yml create mode 100644 x-pack/agent/_meta/common.reference.p1.yml create mode 100644 x-pack/agent/_meta/common.reference.p2.yml create mode 100644 x-pack/agent/agent.docker.yml create mode 100644 x-pack/agent/agent.reference.yml create mode 100644 x-pack/agent/agent.yml create mode 100644 x-pack/agent/cmd/agent/agent.go create mode 100644 x-pack/agent/dev-tools/cmd/buildfleetcfg/buildfleetcfg.go create mode 100644 x-pack/agent/dev-tools/cmd/buildspec/buildspec.go create mode 100644 x-pack/agent/dev-tools/cmd/fakewebapi/README.md create mode 100644 x-pack/agent/dev-tools/cmd/fakewebapi/action_example.json create mode 100644 x-pack/agent/dev-tools/cmd/fakewebapi/checkin.json create mode 100755 x-pack/agent/dev-tools/cmd/fakewebapi/fetch.sh create mode 100644 x-pack/agent/dev-tools/cmd/fakewebapi/main.go create mode 100755 x-pack/agent/dev-tools/cmd/fakewebapi/push.sh create mode 100644 x-pack/agent/docker-compose.yml create mode 100644 x-pack/agent/docs/agent.asciidoc create mode 100644 x-pack/agent/docs/agent_configuration_example.yml create mode 100644 x-pack/agent/magefile.go create mode 100644 x-pack/agent/main.go create mode 100644 x-pack/agent/main_test.go create mode 100644 x-pack/agent/pkg/agent/application/action_dispatcher.go create mode 100644 x-pack/agent/pkg/agent/application/action_dispatcher_test.go create mode 100644 x-pack/agent/pkg/agent/application/action_store.go create mode 100644 x-pack/agent/pkg/agent/application/action_store_test.go create mode 100644 x-pack/agent/pkg/agent/application/application.go create mode 100644 x-pack/agent/pkg/agent/application/application_test.go create mode 100644 x-pack/agent/pkg/agent/application/config.go create mode 100644 x-pack/agent/pkg/agent/application/config_request.go create mode 100644 x-pack/agent/pkg/agent/application/config_request_test.go create mode 100644 x-pack/agent/pkg/agent/application/config_test.go create mode 100644 x-pack/agent/pkg/agent/application/configuration_embed.go create mode 100644 x-pack/agent/pkg/agent/application/emitter.go create mode 100644 x-pack/agent/pkg/agent/application/enroll_cmd.go create mode 100644 x-pack/agent/pkg/agent/application/enroll_cmd_test.go create mode 100644 x-pack/agent/pkg/agent/application/error.go create mode 100644 x-pack/agent/pkg/agent/application/fleet_acker.go create mode 100644 x-pack/agent/pkg/agent/application/fleet_acker_test.go create mode 100644 x-pack/agent/pkg/agent/application/fleet_gateway.go create mode 100644 x-pack/agent/pkg/agent/application/fleet_gateway_test.go create mode 100644 x-pack/agent/pkg/agent/application/global_config.go create mode 100644 x-pack/agent/pkg/agent/application/handler_action_policy_change.go create mode 100644 x-pack/agent/pkg/agent/application/handler_action_policy_change_test.go create mode 100644 x-pack/agent/pkg/agent/application/handler_default.go create mode 100644 x-pack/agent/pkg/agent/application/handler_unknown.go create mode 100644 x-pack/agent/pkg/agent/application/info/agent_id.go create mode 100644 x-pack/agent/pkg/agent/application/info/agent_info.go create mode 100644 x-pack/agent/pkg/agent/application/lazy_acker.go create mode 100644 x-pack/agent/pkg/agent/application/lazy_acker_test.go create mode 100644 x-pack/agent/pkg/agent/application/local_meta.go create mode 100644 x-pack/agent/pkg/agent/application/local_mode.go create mode 100644 x-pack/agent/pkg/agent/application/managed_mode.go create mode 100644 x-pack/agent/pkg/agent/application/monitoring_decorator.go create mode 100644 x-pack/agent/pkg/agent/application/monitoring_decorator_test.go create mode 100644 x-pack/agent/pkg/agent/application/once.go create mode 100644 x-pack/agent/pkg/agent/application/periodic.go create mode 100644 x-pack/agent/pkg/agent/application/router.go create mode 100644 x-pack/agent/pkg/agent/application/router_test.go create mode 100644 x-pack/agent/pkg/agent/application/stream.go create mode 100644 x-pack/agent/pkg/agent/cmd/cmd_test.go create mode 100644 x-pack/agent/pkg/agent/cmd/common.go create mode 100644 x-pack/agent/pkg/agent/cmd/enroll.go create mode 100644 x-pack/agent/pkg/agent/cmd/run.go create mode 100644 x-pack/agent/pkg/agent/configrequest/request.go create mode 100644 x-pack/agent/pkg/agent/configrequest/step.go create mode 100644 x-pack/agent/pkg/agent/errors/error.go create mode 100644 x-pack/agent/pkg/agent/errors/error_test.go create mode 100644 x-pack/agent/pkg/agent/errors/generators.go create mode 100644 x-pack/agent/pkg/agent/errors/types.go create mode 100644 x-pack/agent/pkg/agent/internal/yamltest/yaml.go create mode 100644 x-pack/agent/pkg/agent/operation/config/config.go create mode 100644 x-pack/agent/pkg/agent/operation/event_processor.go create mode 100644 x-pack/agent/pkg/agent/operation/monitoring.go create mode 100644 x-pack/agent/pkg/agent/operation/monitoring_test.go create mode 100644 x-pack/agent/pkg/agent/operation/operation.go create mode 100644 x-pack/agent/pkg/agent/operation/operation_config.go create mode 100644 x-pack/agent/pkg/agent/operation/operation_fetch.go create mode 100644 x-pack/agent/pkg/agent/operation/operation_install.go create mode 100644 x-pack/agent/pkg/agent/operation/operation_remove.go create mode 100644 x-pack/agent/pkg/agent/operation/operation_start.go create mode 100644 x-pack/agent/pkg/agent/operation/operation_stop.go create mode 100644 x-pack/agent/pkg/agent/operation/operation_verify.go create mode 100644 x-pack/agent/pkg/agent/operation/operator.go create mode 100644 x-pack/agent/pkg/agent/operation/operator_handlers.go create mode 100644 x-pack/agent/pkg/agent/operation/operator_test.go create mode 100644 x-pack/agent/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86/README.md create mode 100644 x-pack/agent/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86/main.go create mode 100644 x-pack/agent/pkg/agent/operation/tests/scripts/configurablebyfile-1.0-darwin-x86/README.md create mode 100644 x-pack/agent/pkg/agent/operation/tests/scripts/configurablebyfile-1.0-darwin-x86/main.go create mode 100644 x-pack/agent/pkg/agent/operation/tests/scripts/long-1.0-darwin-x86/README.md create mode 100644 x-pack/agent/pkg/agent/operation/tests/scripts/short-1.0-darwin-x86/README.md create mode 100644 x-pack/agent/pkg/agent/program/methods.go create mode 100644 x-pack/agent/pkg/agent/program/program.go create mode 100644 x-pack/agent/pkg/agent/program/program_test.go create mode 100644 x-pack/agent/pkg/agent/program/spec.go create mode 100644 x-pack/agent/pkg/agent/program/spec_test.go create mode 100644 x-pack/agent/pkg/agent/program/supported.go create mode 100644 x-pack/agent/pkg/agent/program/testdata/audit_config-auditbeat.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/audit_config.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/enabled_false.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/enabled_output_false.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/enabled_output_true.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/enabled_true-filebeat.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/enabled_true.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/journal_config-journalbeat.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/journal_config.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/monitor_config-heartbeat.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/monitor_config.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/single_config-filebeat.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/single_config-metricbeat.yml create mode 100644 x-pack/agent/pkg/agent/program/testdata/single_config.yml create mode 100644 x-pack/agent/pkg/agent/stateresolver/resolve.go create mode 100644 x-pack/agent/pkg/agent/stateresolver/resolve_test.go create mode 100644 x-pack/agent/pkg/agent/stateresolver/statechange_string.go create mode 100644 x-pack/agent/pkg/agent/stateresolver/stateresolver.go create mode 100644 x-pack/agent/pkg/agent/stateresolver/stateresolver_test.go create mode 100644 x-pack/agent/pkg/agent/storage/storage.go create mode 100644 x-pack/agent/pkg/agent/storage/storage_test.go create mode 100644 x-pack/agent/pkg/agent/transpiler/ast.go create mode 100644 x-pack/agent/pkg/agent/transpiler/ast_test.go create mode 100644 x-pack/agent/pkg/agent/transpiler/map_visitor.go create mode 100644 x-pack/agent/pkg/agent/transpiler/rules.go create mode 100644 x-pack/agent/pkg/agent/transpiler/rules_test.go create mode 100644 x-pack/agent/pkg/agent/transpiler/visitor.go create mode 100644 x-pack/agent/pkg/artifact/artifact.go create mode 100644 x-pack/agent/pkg/artifact/config.go create mode 100644 x-pack/agent/pkg/artifact/download/composed/downloader.go create mode 100644 x-pack/agent/pkg/artifact/download/composed/downloader_test.go create mode 100644 x-pack/agent/pkg/artifact/download/composed/verifier.go create mode 100644 x-pack/agent/pkg/artifact/download/downloader.go create mode 100644 x-pack/agent/pkg/artifact/download/fs/downloader.go create mode 100644 x-pack/agent/pkg/artifact/download/fs/verifier.go create mode 100644 x-pack/agent/pkg/artifact/download/http/downloader.go create mode 100644 x-pack/agent/pkg/artifact/download/http/elastic_test.go create mode 100644 x-pack/agent/pkg/artifact/download/http/headers_rtt.go create mode 100644 x-pack/agent/pkg/artifact/download/http/headers_rtt_test.go create mode 100644 x-pack/agent/pkg/artifact/download/http/verifier.go create mode 100644 x-pack/agent/pkg/artifact/download/localremote/downloader.go create mode 100644 x-pack/agent/pkg/artifact/download/localremote/verifier.go create mode 100644 x-pack/agent/pkg/artifact/download/verifier.go create mode 100644 x-pack/agent/pkg/artifact/install/installer.go create mode 100644 x-pack/agent/pkg/artifact/install/tar/tar_installer.go create mode 100644 x-pack/agent/pkg/artifact/install/zip/zip_installer.go create mode 100644 x-pack/agent/pkg/basecmd/cmd.go create mode 100644 x-pack/agent/pkg/basecmd/cmd_test.go create mode 100644 x-pack/agent/pkg/basecmd/version/cmd.go create mode 100644 x-pack/agent/pkg/basecmd/version/cmd_test.go create mode 100644 x-pack/agent/pkg/boolexp/Boolexp.g4 create mode 100644 x-pack/agent/pkg/boolexp/boolexp.go create mode 100644 x-pack/agent/pkg/boolexp/boolexp_test.go create mode 100644 x-pack/agent/pkg/boolexp/compare.go create mode 100644 x-pack/agent/pkg/boolexp/expression.go create mode 100644 x-pack/agent/pkg/boolexp/methods.go create mode 100644 x-pack/agent/pkg/boolexp/parser/Boolexp.interp create mode 100644 x-pack/agent/pkg/boolexp/parser/Boolexp.tokens create mode 100644 x-pack/agent/pkg/boolexp/parser/BoolexpLexer.interp create mode 100644 x-pack/agent/pkg/boolexp/parser/BoolexpLexer.tokens create mode 100644 x-pack/agent/pkg/boolexp/parser/boolexp_base_listener.go create mode 100644 x-pack/agent/pkg/boolexp/parser/boolexp_base_visitor.go create mode 100644 x-pack/agent/pkg/boolexp/parser/boolexp_lexer.go create mode 100644 x-pack/agent/pkg/boolexp/parser/boolexp_listener.go create mode 100644 x-pack/agent/pkg/boolexp/parser/boolexp_parser.go create mode 100644 x-pack/agent/pkg/boolexp/parser/boolexp_visitor.go create mode 100644 x-pack/agent/pkg/boolexp/visitor.go create mode 100644 x-pack/agent/pkg/cli/flags.go create mode 100644 x-pack/agent/pkg/cli/flags_test.go create mode 100644 x-pack/agent/pkg/cli/streams.go create mode 100644 x-pack/agent/pkg/config/config.go create mode 100644 x-pack/agent/pkg/config/config_test.go create mode 100644 x-pack/agent/pkg/core/logger/logger.go create mode 100644 x-pack/agent/pkg/core/logger/logger_test.go create mode 100644 x-pack/agent/pkg/core/plugin/app/app.go create mode 100644 x-pack/agent/pkg/core/plugin/app/client.go create mode 100644 x-pack/agent/pkg/core/plugin/app/configure.go create mode 100644 x-pack/agent/pkg/core/plugin/app/descriptor.go create mode 100644 x-pack/agent/pkg/core/plugin/app/execution_context.go create mode 100644 x-pack/agent/pkg/core/plugin/app/monitoring/beats/beats_monitor.go create mode 100644 x-pack/agent/pkg/core/plugin/app/monitoring/beats/drop_test.go create mode 100644 x-pack/agent/pkg/core/plugin/app/monitoring/beats/monitoring.go create mode 100644 x-pack/agent/pkg/core/plugin/app/monitoring/config.go create mode 100644 x-pack/agent/pkg/core/plugin/app/monitoring/monitor.go create mode 100644 x-pack/agent/pkg/core/plugin/app/monitoring/noop/noop_monitor.go create mode 100644 x-pack/agent/pkg/core/plugin/app/process_cred.go create mode 100644 x-pack/agent/pkg/core/plugin/app/process_cred_other.go create mode 100644 x-pack/agent/pkg/core/plugin/app/spec.go create mode 100644 x-pack/agent/pkg/core/plugin/app/start.go create mode 100644 x-pack/agent/pkg/core/plugin/app/tag.go create mode 100644 x-pack/agent/pkg/core/plugin/app/watch_posix.go create mode 100644 x-pack/agent/pkg/core/plugin/app/watch_windows.go create mode 100644 x-pack/agent/pkg/core/plugin/authority/ca.go create mode 100644 x-pack/agent/pkg/core/plugin/process/cmd.go create mode 100644 x-pack/agent/pkg/core/plugin/process/cmd_cred.go create mode 100644 x-pack/agent/pkg/core/plugin/process/config.go create mode 100644 x-pack/agent/pkg/core/plugin/process/process.go create mode 100644 x-pack/agent/pkg/core/plugin/retry/config.go create mode 100644 x-pack/agent/pkg/core/plugin/retry/error.go create mode 100644 x-pack/agent/pkg/core/plugin/retry/retrystrategy.go create mode 100644 x-pack/agent/pkg/core/plugin/retry/retrystrategy_test.go create mode 100644 x-pack/agent/pkg/core/plugin/server/server.go create mode 100644 x-pack/agent/pkg/core/plugin/state/state.go create mode 100644 x-pack/agent/pkg/core/remoteconfig/config.go create mode 100644 x-pack/agent/pkg/core/remoteconfig/grpc/configclient.go create mode 100644 x-pack/agent/pkg/core/remoteconfig/grpc/connection_provider.go create mode 100644 x-pack/agent/pkg/core/remoteconfig/grpc/factory.go create mode 100644 x-pack/agent/pkg/core/remoteconfig/grpc/noop_backoff.go create mode 100644 x-pack/agent/pkg/core/remoteconfig/grpc/remote_config.pb.go create mode 100644 x-pack/agent/pkg/crypto/io.go create mode 100644 x-pack/agent/pkg/crypto/io_test.go create mode 100644 x-pack/agent/pkg/dir/discover.go create mode 100644 x-pack/agent/pkg/dir/discover_test.go create mode 100644 x-pack/agent/pkg/filewatcher/watcher.go create mode 100644 x-pack/agent/pkg/filewatcher/watcher_test.go create mode 100644 x-pack/agent/pkg/fleetapi/ack_cmd.go create mode 100644 x-pack/agent/pkg/fleetapi/ack_cmd_test.go create mode 100644 x-pack/agent/pkg/fleetapi/action.go create mode 100644 x-pack/agent/pkg/fleetapi/checkin_cmd.go create mode 100644 x-pack/agent/pkg/fleetapi/checkin_cmd_test.go create mode 100644 x-pack/agent/pkg/fleetapi/client.go create mode 100644 x-pack/agent/pkg/fleetapi/client_test.go create mode 100644 x-pack/agent/pkg/fleetapi/custom_type.go create mode 100644 x-pack/agent/pkg/fleetapi/custom_type_test.go create mode 100644 x-pack/agent/pkg/fleetapi/enroll_cmd.go create mode 100644 x-pack/agent/pkg/fleetapi/enroll_cmd_test.go create mode 100644 x-pack/agent/pkg/fleetapi/helper_test.go create mode 100644 x-pack/agent/pkg/fleetapi/round_trippers.go create mode 100644 x-pack/agent/pkg/id/generate.go create mode 100644 x-pack/agent/pkg/id/generate_test.go create mode 100644 x-pack/agent/pkg/kibana/client.go create mode 100644 x-pack/agent/pkg/kibana/client_test.go create mode 100644 x-pack/agent/pkg/kibana/config.go create mode 100644 x-pack/agent/pkg/kibana/round_trippers.go create mode 100644 x-pack/agent/pkg/packer/packer.go create mode 100644 x-pack/agent/pkg/packer/packer_test.go create mode 100644 x-pack/agent/pkg/release/release_dev.go create mode 100644 x-pack/agent/pkg/release/version.go create mode 100644 x-pack/agent/pkg/release/version_test.go create mode 100644 x-pack/agent/pkg/reporter/backend.go create mode 100644 x-pack/agent/pkg/reporter/event.go create mode 100644 x-pack/agent/pkg/reporter/fleet/config.go create mode 100644 x-pack/agent/pkg/reporter/fleet/reporter.go create mode 100644 x-pack/agent/pkg/reporter/fleet/reporter_test.go create mode 100644 x-pack/agent/pkg/reporter/log/config.go create mode 100644 x-pack/agent/pkg/reporter/log/format.go create mode 100644 x-pack/agent/pkg/reporter/log/reporter.go create mode 100644 x-pack/agent/pkg/reporter/log/reporter_test.go create mode 100644 x-pack/agent/pkg/reporter/noop/reporter.go create mode 100644 x-pack/agent/pkg/reporter/reporter.go create mode 100644 x-pack/agent/pkg/reporter/reporter_test.go create mode 100644 x-pack/agent/pkg/scheduler/scheduler.go create mode 100644 x-pack/agent/pkg/scheduler/scheduler_test.go create mode 100644 x-pack/agent/pkg/sorted/set.go create mode 100644 x-pack/agent/pkg/sorted/set_test.go create mode 100644 x-pack/agent/pkg/tokenbucket/token_bucket.go create mode 100644 x-pack/agent/pkg/tokenbucket/token_bucket_test.go create mode 100644 x-pack/agent/proto/remote_config.proto create mode 100644 x-pack/agent/spec/auditbeat.yml.disabled create mode 100644 x-pack/agent/spec/filebeat.yml create mode 100644 x-pack/agent/spec/heartbeat.yml.disabled create mode 100644 x-pack/agent/spec/journalbeat.yml.disabled create mode 100644 x-pack/agent/spec/metricbeat.yml create mode 100644 x-pack/libbeat/management/fleet/config.go create mode 100644 x-pack/libbeat/management/fleet/config_server.go create mode 100644 x-pack/libbeat/management/fleet/manager.go create mode 100644 x-pack/libbeat/management/fleet/manager_test.go create mode 100644 x-pack/libbeat/management/fleet/plugin.go create mode 100644 x-pack/libbeat/management/plugin.go diff --git a/.travis.yml b/.travis.yml index 1c224f5eaa8..44b547e49c6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -153,6 +153,16 @@ jobs: go: $TRAVIS_GO_VERSION stage: test + # Agent + - os: linux + env: TARGETS="-C x-pack/agent testsuite" + go: $TRAVIS_GO_VERSION + stage: test + - os: osx + env: TARGETS="TEST_ENVIRONMENT=0 -C x-pack/agent testsuite" + go: $TRAVIS_GO_VERSION + stage: test + # Generators - os: linux env: TARGETS="-C generator/_templates/metricbeat test test-package" diff --git a/Makefile b/Makefile index f7262cc0701..2d08ff6c959 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ BUILD_DIR=$(CURDIR)/build COVERAGE_DIR=$(BUILD_DIR)/coverage -BEATS?=auditbeat filebeat heartbeat journalbeat metricbeat packetbeat winlogbeat x-pack/functionbeat +BEATS?=auditbeat filebeat heartbeat journalbeat metricbeat packetbeat winlogbeat x-pack/functionbeat x-pack/agent PROJECTS=libbeat $(BEATS) PROJECTS_ENV=libbeat filebeat metricbeat PYTHON_ENV?=$(BUILD_DIR)/python-env diff --git a/NOTICE.txt b/NOTICE.txt index b28dc94889b..4ca322a3594 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -346,6 +346,65 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: github.com/antlr/antlr4 +Revision: 225249fdaef5 +License type (autodetected): BSD-3-Clause +./vendor/github.com/antlr/antlr4/LICENSE.txt: +-------------------------------------------------------------------- +[The "BSD 3-clause license"] +Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +===== + +MIT License for codepointat.js from https://git.io/codepointat +MIT License for fromcodepoint.js from https://git.io/vDW1m + +Copyright Mathias Bynens + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + -------------------------------------------------------------------- Dependency: github.com/armon/go-socks5 Revision: e75332964ef5 @@ -2926,167 +2985,888 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------- -Dependency: github.com/google/gofuzz -Version: v1.0.0 -License type (autodetected): Apache-2.0 -./vendor/github.com/google/gofuzz/LICENSE: --------------------------------------------------------------------- -Apache License 2.0 +-------------------------------------------------------------------- +Dependency: github.com/google/gofuzz +Version: v1.0.0 +License type (autodetected): Apache-2.0 +./vendor/github.com/google/gofuzz/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/google/gopacket +Version: v1.1.18 +Revision: 0ad7f2610e34 +License type (autodetected): BSD-3-Clause +./vendor/github.com/google/gopacket/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2012 Google, Inc. All rights reserved. +Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Andreas Krennmair, Google, nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +Dependency: github.com/google/uuid +Version: v1.1.2 +Revision: c2e93f3ae59f +License type (autodetected): BSD-3-Clause +./vendor/github.com/google/uuid/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +Dependency: github.com/googleapis/gax-go/v2 +Version: v2.0.5 +License type (autodetected): BSD-3-Clause +./vendor/github.com/googleapis/gax-go/v2/LICENSE: +-------------------------------------------------------------------- +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +Dependency: github.com/googleapis/gnostic +Version: v0.3.1 +Revision: 25d8b0b66985 +License type (autodetected): Apache-2.0 +./vendor/github.com/googleapis/gnostic/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/gorhill/cronexpr +Revision: d520615e531a +License type (autodetected): Apache-2.0 +./vendor/github.com/gorhill/cronexpr/APLv2: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/gorilla/websocket +Version: v1.4.1 +License type (autodetected): BSD-2-Clause +./vendor/github.com/gorilla/websocket/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +Dependency: github.com/hashicorp/errwrap +Revision: 7554cd9344ce +License type (autodetected): MPL-2.0 +./vendor/github.com/hashicorp/errwrap/LICENSE: +-------------------------------------------------------------------- +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + + +-------------------------------------------------------------------- +Dependency: github.com/hashicorp/go-multierror +Revision: ed905158d874 +License type (autodetected): MPL-2.0 +./vendor/github.com/hashicorp/go-multierror/LICENSE: +-------------------------------------------------------------------- +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. +6. Disclaimer of Warranty --------------------------------------------------------------------- -Dependency: github.com/google/gopacket -Version: v1.1.18 -Revision: 0ad7f2610e34 -License type (autodetected): BSD-3-Clause -./vendor/github.com/google/gopacket/LICENSE: --------------------------------------------------------------------- -Copyright (c) 2012 Google, Inc. All rights reserved. -Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved. + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +7. Limitation of Liability - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Andreas Krennmair, Google, nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +8. Litigation --------------------------------------------------------------------- -Dependency: github.com/google/uuid -Version: v1.1.2 -Revision: c2e93f3ae59f -License type (autodetected): BSD-3-Clause -./vendor/github.com/google/uuid/LICENSE: --------------------------------------------------------------------- -Copyright (c) 2009,2014 Google Inc. All rights reserved. + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +9. Miscellaneous - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------- -Dependency: github.com/googleapis/gax-go/v2 -Version: v2.0.5 -License type (autodetected): BSD-3-Clause -./vendor/github.com/googleapis/gax-go/v2/LICENSE: --------------------------------------------------------------------- -Copyright 2016, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +10. Versions of the License - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +10.1. New Versions -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. --------------------------------------------------------------------- -Dependency: github.com/googleapis/gnostic -Version: v0.3.1 -Revision: 25d8b0b66985 -License type (autodetected): Apache-2.0 -./vendor/github.com/googleapis/gnostic/LICENSE: --------------------------------------------------------------------- -Apache License 2.0 +10.2. Effect of New Versions + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. --------------------------------------------------------------------- -Dependency: github.com/gorhill/cronexpr -Revision: d520615e531a -License type (autodetected): Apache-2.0 -./vendor/github.com/gorhill/cronexpr/APLv2: --------------------------------------------------------------------- -Apache License 2.0 +10.3. Modified Versions + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). --------------------------------------------------------------------- -Dependency: github.com/gorilla/websocket -Version: v1.4.1 -License type (autodetected): BSD-2-Clause -./vendor/github.com/gorilla/websocket/LICENSE: --------------------------------------------------------------------- -Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +Exhibit A - Source Code Form License Notice - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. -------------------------------------------------------------------- Dependency: github.com/hashicorp/go-uuid @@ -5452,6 +6232,15 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------- +Dependency: github.com/oklog/ulid +Version: v1.3.1 +License type (autodetected): Apache-2.0 +./vendor/github.com/oklog/ulid/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: github.com/opencontainers/go-digest Version: v1.0.0 @@ -6118,8 +6907,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------- Dependency: github.com/stretchr/objx -Version: v0.1.2 -Revision: b8b73a35e983 +Version: v0.2.0 License type (autodetected): MIT ./vendor/github.com/stretchr/objx/LICENSE: -------------------------------------------------------------------- @@ -6243,6 +7031,33 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: github.com/urso/diag +Revision: 21b3cc8eb797 +License type (autodetected): Apache-2.0 +./vendor/github.com/urso/diag/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/urso/diag-ecs +Revision: ab085841dcb9 +License type (autodetected): Apache-2.0 +./vendor/github.com/urso/diag-ecs/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/urso/ecslog +Version: v0.0.1 +License type (autodetected): Apache-2.0 +./vendor/github.com/urso/ecslog/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: github.com/urso/go-bin Revision: 781c575c9f0e @@ -6254,13 +7069,22 @@ Apache License 2.0 -------------------------------------------------------------------- Dependency: github.com/urso/magetools -Revision: 61080ed7b22b +Revision: c2e338f92f3a License type (autodetected): Apache-2.0 ./vendor/github.com/urso/magetools/LICENSE: -------------------------------------------------------------------- Apache License 2.0 +-------------------------------------------------------------------- +Dependency: github.com/urso/sderr +Revision: c2a16f3d43ec +License type (autodetected): Apache-2.0 +./vendor/github.com/urso/sderr/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: github.com/vmware/govmomi Revision: 2cad15190b41 @@ -7084,7 +7908,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- Dependency: gopkg.in/yaml.v2 -Version: v2.2.7 +Version: v2.2.8 License type (autodetected): Apache-2.0 ./vendor/gopkg.in/yaml.v2/LICENSE: -------------------------------------------------------------------- @@ -7107,7 +7931,7 @@ limitations under the License. -------------------------------------------------------------------- Dependency: gopkg.in/yaml.v2 -Version: v2.2.7 +Version: v2.2.8 License type (autodetected): MIT ./vendor/gopkg.in/yaml.v2/LICENSE.libyaml: -------------------------------------------------------------------- diff --git a/filebeat/filebeat.spec b/filebeat/filebeat.spec new file mode 100644 index 00000000000..76c08d472c7 --- /dev/null +++ b/filebeat/filebeat.spec @@ -0,0 +1 @@ +{"BinaryPath":"filebeat","Args":["-e"],"Configurable":"grpc"} diff --git a/go.mod b/go.mod index d49fb8a0ebe..c2be718d89c 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,7 @@ require ( github.com/aerospike/aerospike-client-go v1.27.1-0.20170612174108-0f3b54da6bdc github.com/akavel/rsrc v0.8.0 // indirect github.com/andrewkroh/sys v0.0.0-20151128191922-287798fe3e43 + github.com/antlr/antlr4 v0.0.0-20200225173536-225249fdaef5 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/aws/aws-lambda-go v1.6.0 github.com/aws/aws-sdk-go-v2 v0.9.0 @@ -82,12 +83,14 @@ require ( github.com/golang/protobuf v1.3.2 github.com/golang/snappy v0.0.1 github.com/google/flatbuffers v1.7.2-0.20170925184458-7a6b2bf521e9 + github.com/google/go-cmp v0.4.0 github.com/google/gopacket v1.1.18-0.20191009163724-0ad7f2610e34 github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f // indirect github.com/googleapis/gnostic v0.3.1-0.20190624222214-25d8b0b66985 // indirect github.com/gorhill/cronexpr v0.0.0-20161205141322-d520615e531a github.com/gorilla/mux v1.7.2 // indirect github.com/gorilla/websocket v1.4.1 // indirect + github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874 github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d // indirect github.com/insomniacslk/dhcp v0.0.0-20180716145214-633285ba52b2 github.com/jcmturner/gofork v1.0.0 // indirect @@ -110,6 +113,7 @@ require ( github.com/mitchellh/hashstructure v0.0.0-20170116052023-ab25296c0f51 github.com/mitchellh/mapstructure v1.1.2 github.com/morikuni/aec v1.0.0 // indirect + github.com/oklog/ulid v1.3.1 github.com/opencontainers/go-digest v1.0.0-rc1.0.20190228220655-ac19fd6e7483 // indirect github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 // indirect github.com/pierrre/gotestcover v0.0.0-20160113212533-7b94f124d338 @@ -127,11 +131,10 @@ require ( github.com/shirou/gopsutil v2.19.11+incompatible github.com/spf13/cobra v0.0.3 github.com/spf13/pflag v1.0.3 - github.com/stretchr/objx v0.1.2-0.20180702103455-b8b73a35e983 // indirect github.com/stretchr/testify v1.4.0 github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b github.com/tsg/gopacket v0.0.0-20190320122513-dd3d0e41124a - github.com/urso/magetools v0.0.0-20200106130147-61080ed7b22b // indirect + github.com/urso/ecslog v0.0.1 github.com/vmware/govmomi v0.0.0-20170802214208-2cad15190b41 github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7 // indirect @@ -149,10 +152,11 @@ require ( golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 google.golang.org/api v0.15.0 google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb + google.golang.org/grpc v1.27.1 gopkg.in/inf.v0 v0.9.0 gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 - gopkg.in/yaml.v2 v2.2.7 + gopkg.in/yaml.v2 v2.2.8 howett.net/plist v0.0.0-20181124034731-591f970eefbb k8s.io/api v0.0.0-20190722141453-b90922c02518 k8s.io/apimachinery v0.0.0-20190719140911-bfcf53abc9f8 diff --git a/go.sum b/go.sum index d95286d6c56..9ba3979f1e6 100644 --- a/go.sum +++ b/go.sum @@ -98,6 +98,7 @@ github.com/aerospike/aerospike-client-go v1.27.1-0.20170612174108-0f3b54da6bdc h github.com/aerospike/aerospike-client-go v1.27.1-0.20170612174108-0f3b54da6bdc/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -106,6 +107,9 @@ github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 h1:7rj9qZ63knnVo2Z github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20/go.mod h1:cI59GRkC2FRaFYtgbYEqMlgnnfvAwXzjojyZKXwklNg= github.com/andrewkroh/sys v0.0.0-20151128191922-287798fe3e43 h1:WFwa9pqou0Nb4DdfBOyaBTH0GqLE74Qwdf61E7ITHwQ= github.com/andrewkroh/sys v0.0.0-20151128191922-287798fe3e43/go.mod h1:tJPYQG4mnMeUtQvQKNkbsFrnmZOg59Qnf8CcctFv5v4= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antlr/antlr4 v0.0.0-20200225173536-225249fdaef5 h1:nkZ9axP+MvUFCu8JRN/MCY+DmTfs6lY7hE0QnJbxSdI= +github.com/antlr/antlr4 v0.0.0-20200225173536-225249fdaef5/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-lambda-go v1.6.0 h1:T+u/g79zPKw1oJM7xYhvpq7i4Sjc0iVsXZUaqRVVSOg= @@ -165,6 +169,7 @@ github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQa github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -248,12 +253,14 @@ github.com/elastic/sarama v0.0.0-20191122160421-355d120d0970 h1:rSo6gsz4zOanqtJ5 github.com/elastic/sarama v0.0.0-20191122160421-355d120d0970/go.mod h1:fGP8eQ6PugKEI0iUETYYtnP6d1pH/bdDMTel1X5ajsU= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.5.0 h1:vBh+kQp8lg9XPr56u1CPrWjFXtdphMoGWVHr9/1c+A0= github.com/fatih/color v1.5.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg= @@ -261,6 +268,7 @@ github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60 github.com/garyburd/redigo v1.0.1-0.20160525165706-b8dc90050f24 h1:nREVDi4H8mwnNqfxFU9NMzZrDCg8TXbEatMvHozxKwU= github.com/garyburd/redigo v1.0.1-0.20160525165706-b8dc90050f24/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= @@ -371,7 +379,9 @@ github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874 h1:cAv7ZbSmyb1wjn6T4TIiyFCkpcfgpbcNNC3bM2srLaI= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -393,6 +403,7 @@ github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= @@ -423,6 +434,7 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/justinas/nosurf v1.1.0/go.mod h1:ALpWdSbuNGy2lZWtyXdjkYv4edL23oSEgfBT1gPJ5BQ= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -437,6 +449,7 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -470,6 +483,7 @@ github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v1.0.1 h1:x0jD3dcHk9a9xPSDN6YEL4xL6Qz0dvNYm8yZqui5chI= github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18Ap4z4= github.com/mitchellh/hashstructure v0.0.0-20170116052023-ab25296c0f51 h1:qdHlMllk/PTLUrX3XdtXDrLL1lPSfcqUmJD1eYfbapg= @@ -490,6 +504,8 @@ github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7P github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.5.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -518,6 +534,7 @@ github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= +github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -569,6 +586,7 @@ github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b/go.mod h1:8458kAa github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= github.com/sanathkr/yaml v1.0.1-0.20170819201035-0056894fa522 h1:39BJIaZIhIBmXATIhdlTBlTQpAiGXHnz17CrO7vF2Ss= github.com/sanathkr/yaml v1.0.1-0.20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.19.11+incompatible h1:lJHR0foqAjI4exXqWsU3DbH7bX1xvdhGdnXTIARA9W4= github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -589,10 +607,11 @@ github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bd github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.2-0.20180702103455-b8b73a35e983 h1:4s04gnPlcop3dmAHjOAHWa6gX7Dg7h0gh81gr3GwzIk= -github.com/stretchr/objx v0.1.2-0.20180702103455-b8b73a35e983/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -604,18 +623,28 @@ github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b/go.mod h1:jAqhj/JBVC github.com/tsg/gopacket v0.0.0-20190320122513-dd3d0e41124a h1:vVmCas8T0lbxAI1GuQO45L0o/OrWJSXtiK6vH27Qspg= github.com/tsg/gopacket v0.0.0-20190320122513-dd3d0e41124a/go.mod h1:RIkfovP3Y7my19aXEjjbNd9E5TlHozzAyt7B8AaEcwg= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urso/diag v0.0.0-20200125202105-ffdc32ff5518/go.mod h1:pNWFTeQ+V1OYT/TzWpnWb6eQBdoXpdx+H+lrH97/Oyo= +github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797 h1:OHNw/6pXODJAB32NujjdQO/KIYQ3KAbHQfCzH81XdCs= +github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797/go.mod h1:pNWFTeQ+V1OYT/TzWpnWb6eQBdoXpdx+H+lrH97/Oyo= +github.com/urso/diag-ecs v0.0.0-20200210114345-ab085841dcb9 h1:GzsakegdLNhw0mF2fcFd+BgdY8owV+4Y+6MvbgRXIWg= +github.com/urso/diag-ecs v0.0.0-20200210114345-ab085841dcb9/go.mod h1:+1Ug5A104KCMD8ZZ4YarKGNSt8ANJWA7nWqji37BmrQ= +github.com/urso/ecslog v0.0.1 h1:Candk+au3CbAT5SpVMhTE3VIq0r09UJely7vUzVAfF0= +github.com/urso/ecslog v0.0.1/go.mod h1:wky/kxUyw0VJw/HlXal+7oTT2YDU1KAWrxbuKRFblEI= github.com/urso/go-bin v0.0.0-20180220135811-781c575c9f0e h1:NiofbjIUI5gR+ybDsGSVH1fWyjSeDYiYVJHT1+kcsak= github.com/urso/go-bin v0.0.0-20180220135811-781c575c9f0e/go.mod h1:6GfHrdWBQYjFRIznu7XuQH4lYB2w8nO4bnImVKkzPOM= github.com/urso/magetools v0.0.0-20190919040553-290c89e0c230/go.mod h1:DFxTNgS/ExCGmmjVjSOgS2WjtfjKXgCyDzAFgbtovSA= -github.com/urso/magetools v0.0.0-20200106130147-61080ed7b22b h1:eRYRTx+2CteM4P2U+VgAeAmuMAyB/QAGxWtgH7/o4l8= -github.com/urso/magetools v0.0.0-20200106130147-61080ed7b22b/go.mod h1:DFxTNgS/ExCGmmjVjSOgS2WjtfjKXgCyDzAFgbtovSA= +github.com/urso/magetools v0.0.0-20200125210132-c2e338f92f3a h1:jWAaRFnay3H2e6S0GGCl5nKrkgQNlarCE/kvcutzBmw= +github.com/urso/magetools v0.0.0-20200125210132-c2e338f92f3a/go.mod h1:DbaJnRzkGaWrMWm5Hz6QVnUj//x9/zjrfx8bF3J+GJY= github.com/urso/qcgen v0.0.0-20180131103024-0b059e7db4f4 h1:hhA8EBThzz9PztawVTycKvfETVuBqxAQ5keFlAVtbAw= github.com/urso/qcgen v0.0.0-20180131103024-0b059e7db4f4/go.mod h1:RspW+E2Yb7Fs7HclB2tiDaiu6Rp41BiIG4Wo1YaoXGc= +github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec h1:HkZIDJrMKZHPsYhmH2XjTTSk1pbMCFfpxSnyzZUFm+k= +github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec/go.mod h1:Wp40HwmjM59FkDIVFfcCb9LzBbnc0XAMp8++hJuWvSU= github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= github.com/vmware/govmomi v0.0.0-20170802214208-2cad15190b41 h1:NeNpIvfvaFOh0BH7nMEljE5Rk/VJlxhm58M41SeOD20= github.com/vmware/govmomi v0.0.0-20170802214208-2cad15190b41/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/xanzy/go-gitlab v0.22.3 h1:/rNlZ2hquUWNc6rJdntVM03tEOoTmnZ1lcNyJCl0WlU= github.com/xanzy/go-gitlab v0.22.3/go.mod h1:t4Bmvnxj7k37S4Y17lfLx+nLqkf/oQwT2HagfWKv5Og= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= @@ -644,10 +673,12 @@ go.uber.org/zap v1.7.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -692,6 +723,7 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191021144547-ec77196f6094/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -721,6 +753,7 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -732,6 +765,7 @@ golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -768,6 +802,7 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -815,6 +850,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -834,8 +871,12 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 h1:/saqWwm73dLmuzbNhe92F0QsZ/KiFND+esHco2v1hiY= gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -843,6 +884,8 @@ gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index a903fcdc523..ed89ae91a9f 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -617,7 +617,7 @@ func (b *Beat) configure(settings Settings) error { logp.Info("Beat ID: %v", b.Info.ID) // initialize config manager - b.ConfigManager, err = management.Factory()(b.Config.Management, reload.Register, b.Beat.Info.ID) + b.ConfigManager, err = management.Factory(b.Config.Management)(b.Config.Management, reload.Register, b.Beat.Info.ID) if err != nil { return err } diff --git a/libbeat/management/management.go b/libbeat/management/management.go index fb6c89ba870..dc793fafe2d 100644 --- a/libbeat/management/management.go +++ b/libbeat/management/management.go @@ -31,6 +31,8 @@ var Namespace = "libbeat.management" // DebugK used as key for all things central management var DebugK = "centralmgmt" +var centralMgmtKey = "x-pack-cm" + // ConfigManager interacts with the beat to update configurations // from an external source type ConfigManager interface { @@ -47,32 +49,47 @@ type ConfigManager interface { CheckRawConfig(cfg *common.Config) error } +// PluginFunc for creating FactoryFunc if it matches a config +type PluginFunc func(*common.Config) FactoryFunc + // FactoryFunc for creating a config manager type FactoryFunc func(*common.Config, *reload.Registry, uuid.UUID) (ConfigManager, error) // Register a config manager -func Register(name string, fn FactoryFunc, stability feature.Stability) { +func Register(name string, fn PluginFunc, stability feature.Stability) { f := feature.New(Namespace, name, fn, feature.MakeDetails(name, "", stability)) feature.MustRegister(f) } // Factory retrieves config manager constructor. If no one is registered // it will create a nil manager -func Factory() FactoryFunc { +func Factory(cfg *common.Config) FactoryFunc { factories, err := feature.GlobalRegistry().LookupAll(Namespace) if err != nil { return nilFactory } for _, f := range factories { - if factory, ok := f.Factory().(FactoryFunc); ok { - return factory + if plugin, ok := f.Factory().(PluginFunc); ok { + if factory := plugin(cfg); factory != nil { + return factory + } } } return nilFactory } +type modeConfig struct { + Mode string `config:"mode" yaml:"mode"` +} + +func defaultModeConfig() *modeConfig { + return &modeConfig{ + Mode: centralMgmtKey, + } +} + // nilManager, fallback when no manager is present type nilManager struct{} diff --git a/vendor/github.com/antlr/antlr4/LICENSE.txt b/vendor/github.com/antlr/antlr4/LICENSE.txt new file mode 100644 index 00000000000..2042d1bda6c --- /dev/null +++ b/vendor/github.com/antlr/antlr4/LICENSE.txt @@ -0,0 +1,52 @@ +[The "BSD 3-clause license"] +Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +===== + +MIT License for codepointat.js from https://git.io/codepointat +MIT License for fromcodepoint.js from https://git.io/vDW1m + +Copyright Mathias Bynens + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go new file mode 100644 index 00000000000..1592212e146 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go @@ -0,0 +1,152 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +var ATNInvalidAltNumber int + +type ATN struct { + // DecisionToState is the decision points for all rules, subrules, optional + // blocks, ()+, ()*, etc. Used to build DFA predictors for them. + DecisionToState []DecisionState + + // grammarType is the ATN type and is used for deserializing ATNs from strings. + grammarType int + + // lexerActions is referenced by action transitions in the ATN for lexer ATNs. + lexerActions []LexerAction + + // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN. + maxTokenType int + + modeNameToStartState map[string]*TokensStartState + + modeToStartState []*TokensStartState + + // ruleToStartState maps from rule index to starting state number. + ruleToStartState []*RuleStartState + + // ruleToStopState maps from rule index to stop state number. + ruleToStopState []*RuleStopState + + // ruleToTokenType maps the rule index to the resulting token type for lexer + // ATNs. For parser ATNs, it maps the rule index to the generated bypass token + // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was + // specified, and otherwise is nil. + ruleToTokenType []int + + states []ATNState +} + +func NewATN(grammarType int, maxTokenType int) *ATN { + return &ATN{ + grammarType: grammarType, + maxTokenType: maxTokenType, + modeNameToStartState: make(map[string]*TokensStartState), + } +} + +// NextTokensInContext computes the set of valid tokens that can occur starting +// in state s. If ctx is nil, the set of tokens will not include what can follow +// the rule surrounding s. In other words, the set will be restricted to tokens +// reachable staying within the rule of s. +func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet { + return NewLL1Analyzer(a).Look(s, nil, ctx) +} + +// NextTokensNoContext computes the set of valid tokens that can occur starting +// in s and staying in same rule. Token.EPSILON is in set if we reach end of +// rule. +func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet { + if s.GetNextTokenWithinRule() != nil { + return s.GetNextTokenWithinRule() + } + + s.SetNextTokenWithinRule(a.NextTokensInContext(s, nil)) + s.GetNextTokenWithinRule().readOnly = true + + return s.GetNextTokenWithinRule() +} + +func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet { + if ctx == nil { + return a.NextTokensNoContext(s) + } + + return a.NextTokensInContext(s, ctx) +} + +func (a *ATN) addState(state ATNState) { + if state != nil { + state.SetATN(a) + state.SetStateNumber(len(a.states)) + } + + a.states = append(a.states, state) +} + +func (a *ATN) removeState(state ATNState) { + a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice +} + +func (a *ATN) defineDecisionState(s DecisionState) int { + a.DecisionToState = append(a.DecisionToState, s) + s.setDecision(len(a.DecisionToState) - 1) + + return s.getDecision() +} + +func (a *ATN) getDecisionState(decision int) DecisionState { + if len(a.DecisionToState) == 0 { + return nil + } + + return a.DecisionToState[decision] +} + +// getExpectedTokens computes the set of input symbols which could follow ATN +// state number stateNumber in the specified full parse context ctx and returns +// the set of potentially valid input symbols which could follow the specified +// state in the specified context. This method considers the complete parser +// context, but does not evaluate semantic predicates (i.e. all predicates +// encountered during the calculation are assumed true). If a path in the ATN +// exists from the starting state to the RuleStopState of the outermost context +// without Matching any symbols, Token.EOF is added to the returned set. +// +// A nil ctx defaults to ParserRuleContext.EMPTY. +// +// It panics if the ATN does not contain state stateNumber. +func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet { + if stateNumber < 0 || stateNumber >= len(a.states) { + panic("Invalid state number.") + } + + s := a.states[stateNumber] + following := a.NextTokens(s, nil) + + if !following.contains(TokenEpsilon) { + return following + } + + expected := NewIntervalSet() + + expected.addSet(following) + expected.removeOne(TokenEpsilon) + + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := a.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + + following = a.NextTokens(rt.(*RuleTransition).followState, nil) + expected.addSet(following) + expected.removeOne(TokenEpsilon) + ctx = ctx.GetParent().(RuleContext) + } + + if following.contains(TokenEpsilon) { + expected.addOne(TokenEOF) + } + + return expected +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go new file mode 100644 index 00000000000..0535d5246c5 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go @@ -0,0 +1,295 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +type comparable interface { + equals(other interface{}) bool +} + +// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic +// context). The syntactic context is a graph-structured stack node whose +// path(s) to the root is the rule invocation(s) chain used to arrive at the +// state. The semantic context is the tree of semantic predicates encountered +// before reaching an ATN state. +type ATNConfig interface { + comparable + + hash() int + + GetState() ATNState + GetAlt() int + GetSemanticContext() SemanticContext + + GetContext() PredictionContext + SetContext(PredictionContext) + + GetReachesIntoOuterContext() int + SetReachesIntoOuterContext(int) + + String() string + + getPrecedenceFilterSuppressed() bool + setPrecedenceFilterSuppressed(bool) +} + +type BaseATNConfig struct { + precedenceFilterSuppressed bool + state ATNState + alt int + context PredictionContext + semanticContext SemanticContext + reachesIntoOuterContext int +} + +func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup + return &BaseATNConfig{ + state: old.state, + alt: old.alt, + context: old.context, + semanticContext: old.semanticContext, + reachesIntoOuterContext: old.reachesIntoOuterContext, + } +} + +func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig { + return NewBaseATNConfig5(state, alt, context, SemanticContextNone) +} + +func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Necessary? + } + + return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext} +} + +func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig { + return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) +} + +func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig { + return NewBaseATNConfig(c, state, c.GetContext(), semanticContext) +} + +func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig { + return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext) +} + +func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig { + return NewBaseATNConfig(c, state, context, c.GetSemanticContext()) +} + +func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") + } + + return &BaseATNConfig{ + state: state, + alt: c.GetAlt(), + context: context, + semanticContext: semanticContext, + reachesIntoOuterContext: c.GetReachesIntoOuterContext(), + precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(), + } +} + +func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool { + return b.precedenceFilterSuppressed +} + +func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) { + b.precedenceFilterSuppressed = v +} + +func (b *BaseATNConfig) GetState() ATNState { + return b.state +} + +func (b *BaseATNConfig) GetAlt() int { + return b.alt +} + +func (b *BaseATNConfig) SetContext(v PredictionContext) { + b.context = v +} +func (b *BaseATNConfig) GetContext() PredictionContext { + return b.context +} + +func (b *BaseATNConfig) GetSemanticContext() SemanticContext { + return b.semanticContext +} + +func (b *BaseATNConfig) GetReachesIntoOuterContext() int { + return b.reachesIntoOuterContext +} + +func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) { + b.reachesIntoOuterContext = v +} + +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (b *BaseATNConfig) equals(o interface{}) bool { + if b == o { + return true + } + + var other, ok = o.(*BaseATNConfig) + + if !ok { + return false + } + + var equal bool + + if b.context == nil { + equal = other.context == nil + } else { + equal = b.context.equals(other.context) + } + + var ( + nums = b.state.GetStateNumber() == other.state.GetStateNumber() + alts = b.alt == other.alt + cons = b.semanticContext.equals(other.semanticContext) + sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed + ) + + return nums && alts && cons && sups && equal +} + +func (b *BaseATNConfig) hash() int { + var c int + if b.context != nil { + c = b.context.hash() + } + + h := murmurInit(7) + h = murmurUpdate(h, b.state.GetStateNumber()) + h = murmurUpdate(h, b.alt) + h = murmurUpdate(h, c) + h = murmurUpdate(h, b.semanticContext.hash()) + return murmurFinish(h, 4) +} + +func (b *BaseATNConfig) String() string { + var s1, s2, s3 string + + if b.context != nil { + s1 = ",[" + fmt.Sprint(b.context) + "]" + } + + if b.semanticContext != SemanticContextNone { + s2 = "," + fmt.Sprint(b.semanticContext) + } + + if b.reachesIntoOuterContext > 0 { + s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext) + } + + return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3) +} + +type LexerATNConfig struct { + *BaseATNConfig + lexerActionExecutor *LexerActionExecutor + passedThroughNonGreedyDecision bool +} + +func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig { + return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} +} + +func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone), + lexerActionExecutor: lexerActionExecutor, + } +} + +func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), + lexerActionExecutor: c.lexerActionExecutor, + passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), + } +} + +func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), + lexerActionExecutor: lexerActionExecutor, + passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), + } +} + +func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()), + lexerActionExecutor: c.lexerActionExecutor, + passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), + } +} + +func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig { + return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} +} + +func (l *LexerATNConfig) hash() int { + var f int + if l.passedThroughNonGreedyDecision { + f = 1 + } else { + f = 0 + } + h := murmurInit(7) + h = murmurUpdate(h, l.state.hash()) + h = murmurUpdate(h, l.alt) + h = murmurUpdate(h, l.context.hash()) + h = murmurUpdate(h, l.semanticContext.hash()) + h = murmurUpdate(h, f) + h = murmurUpdate(h, l.lexerActionExecutor.hash()) + h = murmurFinish(h, 6) + return h +} + +func (l *LexerATNConfig) equals(other interface{}) bool { + var othert, ok = other.(*LexerATNConfig) + + if l == other { + return true + } else if !ok { + return false + } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision { + return false + } + + var b bool + + if l.lexerActionExecutor != nil { + b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor) + } else { + b = othert.lexerActionExecutor != nil + } + + if b { + return false + } + + return l.BaseATNConfig.equals(othert.BaseATNConfig) +} + + +func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool { + var ds, ok = target.(DecisionState) + + return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go new file mode 100644 index 00000000000..d9f74755fa7 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go @@ -0,0 +1,387 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "fmt" + +type ATNConfigSet interface { + hash() int + Add(ATNConfig, *DoubleDict) bool + AddAll([]ATNConfig) bool + + GetStates() *Set + GetPredicates() []SemanticContext + GetItems() []ATNConfig + + OptimizeConfigs(interpreter *BaseATNSimulator) + + Equals(other interface{}) bool + + Length() int + IsEmpty() bool + Contains(ATNConfig) bool + ContainsFast(ATNConfig) bool + Clear() + String() string + + HasSemanticContext() bool + SetHasSemanticContext(v bool) + + ReadOnly() bool + SetReadOnly(bool) + + GetConflictingAlts() *BitSet + SetConflictingAlts(*BitSet) + + FullContext() bool + + GetUniqueAlt() int + SetUniqueAlt(int) + + GetDipsIntoOuterContext() bool + SetDipsIntoOuterContext(bool) +} + +// BaseATNConfigSet is a specialized set of ATNConfig that tracks information +// about its elements and can combine similar configurations using a +// graph-structured stack. +type BaseATNConfigSet struct { + cachedHash int + + // configLookup is used to determine whether two BaseATNConfigSets are equal. We + // need all configurations with the same (s, i, _, semctx) to be equal. A key + // effectively doubles the number of objects associated with ATNConfigs. All + // keys are hashed by (s, i, _, pi), not including the context. Wiped out when + // read-only because a set becomes a DFA state. + configLookup *Set + + // configs is the added elements. + configs []ATNConfig + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves recomputation. Can we track conflicts as they + // are added to save scanning configs later? + conflictingAlts *BitSet + + // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates + // we hit a pred while computing a closure operation. Do not make a DFA state + // from the BaseATNConfigSet in this case. TODO: How is this used by parsers? + dipsIntoOuterContext bool + + // fullCtx is whether it is part of a full context LL prediction. Used to + // determine how to merge $. It is a wildcard with SLL, but not for an LL + // context merge. + fullCtx bool + + // Used in parser and lexer. In lexer, it indicates we hit a pred + // while computing a closure operation. Don't make a DFA state from a. + hasSemanticContext bool + + // readOnly is whether it is read-only. Do not + // allow any code to manipulate the set if true because DFA states will point at + // sets and those must not change. It not protect other fields; conflictingAlts + // in particular, which is assigned after readOnly. + readOnly bool + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves recomputation. Can we track conflicts as they + // are added to save scanning configs later? + uniqueAlt int +} + +func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet { + return &BaseATNConfigSet{ + cachedHash: -1, + configLookup: NewSet(nil, equalATNConfigs), + fullCtx: fullCtx, + } +} + +// Add merges contexts with existing configs for (s, i, pi, _), where s is the +// ATNConfig.state, i is the ATNConfig.alt, and pi is the +// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates +// dipsIntoOuterContext and hasSemanticContext when necessary. +func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { + if b.readOnly { + panic("set is read-only") + } + + if config.GetSemanticContext() != SemanticContextNone { + b.hasSemanticContext = true + } + + if config.GetReachesIntoOuterContext() > 0 { + b.dipsIntoOuterContext = true + } + + existing := b.configLookup.add(config).(ATNConfig) + + if existing == config { + b.cachedHash = -1 + b.configs = append(b.configs, config) // Track order here + + return true + } + + // Merge a previous (s, i, pi, _) with it and save the result + rootIsWildcard := !b.fullCtx + merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) + + // No need to check for existing.context because config.context is in the cache, + // since the only way to create new graphs is the "call rule" and here. We cache + // at both places. + existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) + + // Preserve the precedence filter suppression during the merge + if config.getPrecedenceFilterSuppressed() { + existing.setPrecedenceFilterSuppressed(true) + } + + // Replace the context because there is no need to do alt mapping + existing.SetContext(merged) + + return true +} + +func (b *BaseATNConfigSet) GetStates() *Set { + states := NewSet(nil, nil) + + for i := 0; i < len(b.configs); i++ { + states.add(b.configs[i].GetState()) + } + + return states +} + +func (b *BaseATNConfigSet) HasSemanticContext() bool { + return b.hasSemanticContext +} + +func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) { + b.hasSemanticContext = v +} + +func (b *BaseATNConfigSet) GetPredicates() []SemanticContext { + preds := make([]SemanticContext, 0) + + for i := 0; i < len(b.configs); i++ { + c := b.configs[i].GetSemanticContext() + + if c != SemanticContextNone { + preds = append(preds, c) + } + } + + return preds +} + +func (b *BaseATNConfigSet) GetItems() []ATNConfig { + return b.configs +} + +func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { + if b.readOnly { + panic("set is read-only") + } + + if b.configLookup.length() == 0 { + return + } + + for i := 0; i < len(b.configs); i++ { + config := b.configs[i] + + config.SetContext(interpreter.getCachedContext(config.GetContext())) + } +} + +func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool { + for i := 0; i < len(coll); i++ { + b.Add(coll[i], nil) + } + + return false +} + +func (b *BaseATNConfigSet) Equals(other interface{}) bool { + if b == other { + return true + } else if _, ok := other.(*BaseATNConfigSet); !ok { + return false + } + + other2 := other.(*BaseATNConfigSet) + + return b.configs != nil && + // TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary? + b.fullCtx == other2.fullCtx && + b.uniqueAlt == other2.uniqueAlt && + b.conflictingAlts == other2.conflictingAlts && + b.hasSemanticContext == other2.hasSemanticContext && + b.dipsIntoOuterContext == other2.dipsIntoOuterContext +} + +func (b *BaseATNConfigSet) hash() int { + if b.readOnly { + if b.cachedHash == -1 { + b.cachedHash = b.hashCodeConfigs() + } + + return b.cachedHash + } + + return b.hashCodeConfigs() +} + +func (b *BaseATNConfigSet) hashCodeConfigs() int { + h := murmurInit(1) + for _, c := range b.configs { + if c != nil { + h = murmurUpdate(h, c.hash()) + } + } + return murmurFinish(h, len(b.configs)) +} + +func (b *BaseATNConfigSet) Length() int { + return len(b.configs) +} + +func (b *BaseATNConfigSet) IsEmpty() bool { + return len(b.configs) == 0 +} + +func (b *BaseATNConfigSet) Contains(item ATNConfig) bool { + if b.configLookup == nil { + panic("not implemented for read-only sets") + } + + return b.configLookup.contains(item) +} + +func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool { + if b.configLookup == nil { + panic("not implemented for read-only sets") + } + + return b.configLookup.contains(item) // TODO: containsFast is not implemented for Set +} + +func (b *BaseATNConfigSet) Clear() { + if b.readOnly { + panic("set is read-only") + } + + b.configs = make([]ATNConfig, 0) + b.cachedHash = -1 + b.configLookup = NewSet(nil, equalATNConfigs) +} + +func (b *BaseATNConfigSet) FullContext() bool { + return b.fullCtx +} + +func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool { + return b.dipsIntoOuterContext +} + +func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) { + b.dipsIntoOuterContext = v +} + +func (b *BaseATNConfigSet) GetUniqueAlt() int { + return b.uniqueAlt +} + +func (b *BaseATNConfigSet) SetUniqueAlt(v int) { + b.uniqueAlt = v +} + +func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet { + return b.conflictingAlts +} + +func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) { + b.conflictingAlts = v +} + +func (b *BaseATNConfigSet) ReadOnly() bool { + return b.readOnly +} + +func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) { + b.readOnly = readOnly + + if readOnly { + b.configLookup = nil // Read only, so no need for the lookup cache + } +} + +func (b *BaseATNConfigSet) String() string { + s := "[" + + for i, c := range b.configs { + s += c.String() + + if i != len(b.configs)-1 { + s += ", " + } + } + + s += "]" + + if b.hasSemanticContext { + s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) + } + + if b.uniqueAlt != ATNInvalidAltNumber { + s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) + } + + if b.conflictingAlts != nil { + s += ",conflictingAlts=" + b.conflictingAlts.String() + } + + if b.dipsIntoOuterContext { + s += ",dipsIntoOuterContext" + } + + return s +} + +type OrderedATNConfigSet struct { + *BaseATNConfigSet +} + +func NewOrderedATNConfigSet() *OrderedATNConfigSet { + b := NewBaseATNConfigSet(false) + + b.configLookup = NewSet(nil, nil) + + return &OrderedATNConfigSet{BaseATNConfigSet: b} +} + +func equalATNConfigs(a, b interface{}) bool { + if a == nil || b == nil { + return false + } + + if a == b { + return true + } + + var ai, ok = a.(ATNConfig) + var bi, ok1 = b.(ATNConfig) + + if !ok || !ok1 { + return false + } + + nums := ai.GetState().GetStateNumber() == bi.GetState().GetStateNumber() + alts := ai.GetAlt() == bi.GetAlt() + cons := ai.GetSemanticContext().equals(bi.GetSemanticContext()) + + return nums && alts && cons +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go new file mode 100644 index 00000000000..18b89efafb2 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go @@ -0,0 +1,25 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false} + +type ATNDeserializationOptions struct { + readOnly bool + verifyATN bool + generateRuleBypassTransitions bool +} + +func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions { + o := new(ATNDeserializationOptions) + + if CopyFrom != nil { + o.readOnly = CopyFrom.readOnly + o.verifyATN = CopyFrom.verifyATN + o.generateRuleBypassTransitions = CopyFrom.generateRuleBypassTransitions + } + + return o +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go new file mode 100644 index 00000000000..884d39cf7cd --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go @@ -0,0 +1,828 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "encoding/hex" + "fmt" + "strconv" + "strings" + "unicode/utf16" +) + +// This is the earliest supported serialized UUID. +// stick to serialized version for now, we don't need a UUID instance +var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E" +var AddedUnicodeSMP = "59627784-3BE5-417A-B9EB-8131A7286089" + +// This list contains all of the currently supported UUIDs, ordered by when +// the feature first appeared in this branch. +var SupportedUUIDs = []string{BaseSerializedUUID, AddedUnicodeSMP} + +var SerializedVersion = 3 + +// This is the current serialized UUID. +var SerializedUUID = AddedUnicodeSMP + +type LoopEndStateIntPair struct { + item0 *LoopEndState + item1 int +} + +type BlockStartStateIntPair struct { + item0 BlockStartState + item1 int +} + +type ATNDeserializer struct { + deserializationOptions *ATNDeserializationOptions + data []rune + pos int + uuid string +} + +func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { + if options == nil { + options = ATNDeserializationOptionsdefaultOptions + } + + return &ATNDeserializer{deserializationOptions: options} +} + +func stringInSlice(a string, list []string) int { + for i, b := range list { + if b == a { + return i + } + } + + return -1 +} + +// isFeatureSupported determines if a particular serialized representation of an +// ATN supports a particular feature, identified by the UUID used for +// serializing the ATN at the time the feature was first introduced. Feature is +// the UUID marking the first time the feature was supported in the serialized +// ATN. ActualUuid is the UUID of the actual serialized ATN which is currently +// being deserialized. It returns true if actualUuid represents a serialized ATN +// at or after the feature identified by feature was introduced, and otherwise +// false. +func (a *ATNDeserializer) isFeatureSupported(feature, actualUUID string) bool { + idx1 := stringInSlice(feature, SupportedUUIDs) + + if idx1 < 0 { + return false + } + + idx2 := stringInSlice(actualUUID, SupportedUUIDs) + + return idx2 >= idx1 +} + +func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN { + a.reset(utf16.Decode(data)) + a.checkVersion() + a.checkUUID() + + atn := a.readATN() + + a.readStates(atn) + a.readRules(atn) + a.readModes(atn) + + sets := make([]*IntervalSet, 0) + + // First, deserialize sets with 16-bit arguments <= U+FFFF. + sets = a.readSets(atn, sets, a.readInt) + // Next, if the ATN was serialized with the Unicode SMP feature, + // deserialize sets with 32-bit arguments <= U+10FFFF. + if (a.isFeatureSupported(AddedUnicodeSMP, a.uuid)) { + sets = a.readSets(atn, sets, a.readInt32) + } + + a.readEdges(atn, sets) + a.readDecisions(atn) + a.readLexerActions(atn) + a.markPrecedenceDecisions(atn) + a.verifyATN(atn) + + if a.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser { + a.generateRuleBypassTransitions(atn) + // Re-verify after modification + a.verifyATN(atn) + } + + return atn + +} + +func (a *ATNDeserializer) reset(data []rune) { + temp := make([]rune, len(data)) + + for i, c := range data { + // Don't adjust the first value since that's the version number + if i == 0 { + temp[i] = c + } else if c > 1 { + temp[i] = c - 2 + } else { + temp[i] = c + 65533 + } + } + + a.data = temp + a.pos = 0 +} + +func (a *ATNDeserializer) checkVersion() { + version := a.readInt() + + if version != SerializedVersion { + panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SerializedVersion) + ").") + } +} + +func (a *ATNDeserializer) checkUUID() { + uuid := a.readUUID() + + if stringInSlice(uuid, SupportedUUIDs) < 0 { + panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SerializedUUID + " or a legacy UUID).") + } + + a.uuid = uuid +} + +func (a *ATNDeserializer) readATN() *ATN { + grammarType := a.readInt() + maxTokenType := a.readInt() + + return NewATN(grammarType, maxTokenType) +} + +func (a *ATNDeserializer) readStates(atn *ATN) { + loopBackStateNumbers := make([]LoopEndStateIntPair, 0) + endStateNumbers := make([]BlockStartStateIntPair, 0) + + nstates := a.readInt() + + for i := 0; i < nstates; i++ { + stype := a.readInt() + + // Ignore bad types of states + if stype == ATNStateInvalidType { + atn.addState(nil) + + continue + } + + ruleIndex := a.readInt() + + if ruleIndex == 0xFFFF { + ruleIndex = -1 + } + + s := a.stateFactory(stype, ruleIndex) + + if stype == ATNStateLoopEnd { + loopBackStateNumber := a.readInt() + + loopBackStateNumbers = append(loopBackStateNumbers, LoopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber}) + } else if s2, ok := s.(BlockStartState); ok { + endStateNumber := a.readInt() + + endStateNumbers = append(endStateNumbers, BlockStartStateIntPair{s2, endStateNumber}) + } + + atn.addState(s) + } + + // Delay the assignment of loop back and end states until we know all the state + // instances have been initialized + for j := 0; j < len(loopBackStateNumbers); j++ { + pair := loopBackStateNumbers[j] + + pair.item0.loopBackState = atn.states[pair.item1] + } + + for j := 0; j < len(endStateNumbers); j++ { + pair := endStateNumbers[j] + + pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState)) + } + + numNonGreedyStates := a.readInt() + + for j := 0; j < numNonGreedyStates; j++ { + stateNumber := a.readInt() + + atn.states[stateNumber].(DecisionState).setNonGreedy(true) + } + + numPrecedenceStates := a.readInt() + + for j := 0; j < numPrecedenceStates; j++ { + stateNumber := a.readInt() + + atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true + } +} + +func (a *ATNDeserializer) readRules(atn *ATN) { + nrules := a.readInt() + + if atn.grammarType == ATNTypeLexer { + atn.ruleToTokenType = make([]int, nrules) // TODO: initIntArray(nrules, 0) + } + + atn.ruleToStartState = make([]*RuleStartState, nrules) // TODO: initIntArray(nrules, 0) + + for i := 0; i < nrules; i++ { + s := a.readInt() + startState := atn.states[s].(*RuleStartState) + + atn.ruleToStartState[i] = startState + + if atn.grammarType == ATNTypeLexer { + tokenType := a.readInt() + + if tokenType == 0xFFFF { + tokenType = TokenEOF + } + + atn.ruleToTokenType[i] = tokenType + } + } + + atn.ruleToStopState = make([]*RuleStopState, nrules) //initIntArray(nrules, 0) + + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + if s2, ok := state.(*RuleStopState); ok { + atn.ruleToStopState[s2.ruleIndex] = s2 + atn.ruleToStartState[s2.ruleIndex].stopState = s2 + } + } +} + +func (a *ATNDeserializer) readModes(atn *ATN) { + nmodes := a.readInt() + + for i := 0; i < nmodes; i++ { + s := a.readInt() + + atn.modeToStartState = append(atn.modeToStartState, atn.states[s].(*TokensStartState)) + } +} + +func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet, readUnicode func() int) []*IntervalSet { + m := a.readInt() + + for i := 0; i < m; i++ { + iset := NewIntervalSet() + + sets = append(sets, iset) + + n := a.readInt() + containsEOF := a.readInt() + + if containsEOF != 0 { + iset.addOne(-1) + } + + for j := 0; j < n; j++ { + i1 := readUnicode() + i2 := readUnicode() + + iset.addRange(i1, i2) + } + } + + return sets +} + +func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) { + nedges := a.readInt() + + for i := 0; i < nedges; i++ { + var ( + src = a.readInt() + trg = a.readInt() + ttype = a.readInt() + arg1 = a.readInt() + arg2 = a.readInt() + arg3 = a.readInt() + trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets) + srcState = atn.states[src] + ) + + srcState.AddTransition(trans, -1) + } + + // Edges for rule stop states can be derived, so they are not serialized + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + for j := 0; j < len(state.GetTransitions()); j++ { + var t, ok = state.GetTransitions()[j].(*RuleTransition) + + if !ok { + continue + } + + outermostPrecedenceReturn := -1 + + if atn.ruleToStartState[t.getTarget().GetRuleIndex()].isPrecedenceRule { + if t.precedence == 0 { + outermostPrecedenceReturn = t.getTarget().GetRuleIndex() + } + } + + trans := NewEpsilonTransition(t.followState, outermostPrecedenceReturn) + + atn.ruleToStopState[t.getTarget().GetRuleIndex()].AddTransition(trans, -1) + } + } + + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + if s2, ok := state.(*BaseBlockStartState); ok { + // We need to know the end state to set its start state + if s2.endState == nil { + panic("IllegalState") + } + + // Block end states can only be associated to a single block start state + if s2.endState.startState != nil { + panic("IllegalState") + } + + s2.endState.startState = state + } + + if s2, ok := state.(*PlusLoopbackState); ok { + for j := 0; j < len(s2.GetTransitions()); j++ { + target := s2.GetTransitions()[j].getTarget() + + if t2, ok := target.(*PlusBlockStartState); ok { + t2.loopBackState = state + } + } + } else if s2, ok := state.(*StarLoopbackState); ok { + for j := 0; j < len(s2.GetTransitions()); j++ { + target := s2.GetTransitions()[j].getTarget() + + if t2, ok := target.(*StarLoopEntryState); ok { + t2.loopBackState = state + } + } + } + } +} + +func (a *ATNDeserializer) readDecisions(atn *ATN) { + ndecisions := a.readInt() + + for i := 0; i < ndecisions; i++ { + s := a.readInt() + decState := atn.states[s].(DecisionState) + + atn.DecisionToState = append(atn.DecisionToState, decState) + decState.setDecision(i) + } +} + +func (a *ATNDeserializer) readLexerActions(atn *ATN) { + if atn.grammarType == ATNTypeLexer { + count := a.readInt() + + atn.lexerActions = make([]LexerAction, count) // initIntArray(count, nil) + + for i := 0; i < count; i++ { + actionType := a.readInt() + data1 := a.readInt() + + if data1 == 0xFFFF { + data1 = -1 + } + + data2 := a.readInt() + + if data2 == 0xFFFF { + data2 = -1 + } + + lexerAction := a.lexerActionFactory(actionType, data1, data2) + + atn.lexerActions[i] = lexerAction + } + } +} + +func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) { + count := len(atn.ruleToStartState) + + for i := 0; i < count; i++ { + atn.ruleToTokenType[i] = atn.maxTokenType + i + 1 + } + + for i := 0; i < count; i++ { + a.generateRuleBypassTransition(atn, i) + } +} + +func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { + bypassStart := NewBasicBlockStartState() + + bypassStart.ruleIndex = idx + atn.addState(bypassStart) + + bypassStop := NewBlockEndState() + + bypassStop.ruleIndex = idx + atn.addState(bypassStop) + + bypassStart.endState = bypassStop + + atn.defineDecisionState(bypassStart.BaseDecisionState) + + bypassStop.startState = bypassStart + + var excludeTransition Transition + var endState ATNState + + if atn.ruleToStartState[idx].isPrecedenceRule { + // Wrap from the beginning of the rule to the StarLoopEntryState + endState = nil + + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + if a.stateIsEndStateFor(state, idx) != nil { + endState = state + excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0] + + break + } + } + + if excludeTransition == nil { + panic("Couldn't identify final state of the precedence rule prefix section.") + } + } else { + endState = atn.ruleToStopState[idx] + } + + // All non-excluded transitions that currently target end state need to target + // blockEnd instead + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + for j := 0; j < len(state.GetTransitions()); j++ { + transition := state.GetTransitions()[j] + + if transition == excludeTransition { + continue + } + + if transition.getTarget() == endState { + transition.setTarget(bypassStop) + } + } + } + + // All transitions leaving the rule start state need to leave blockStart instead + ruleToStartState := atn.ruleToStartState[idx] + count := len(ruleToStartState.GetTransitions()) + + for count > 0 { + bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1) + ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]}) + } + + // Link the new states + atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1) + bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1) + + MatchState := NewBasicState() + + atn.addState(MatchState) + MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1) + bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1) +} + +func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState { + if state.GetRuleIndex() != idx { + return nil + } + + if _, ok := state.(*StarLoopEntryState); !ok { + return nil + } + + maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() + + if _, ok := maybeLoopEndState.(*LoopEndState); !ok { + return nil + } + + var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) + + if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok { + return state + } + + return nil +} + +// markPrecedenceDecisions analyzes the StarLoopEntryState states in the +// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to +// the correct value. +func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { + for _, state := range atn.states { + if _, ok := state.(*StarLoopEntryState); !ok { + continue + } + + // We analyze the ATN to determine if a ATN decision state is the + // decision for the closure block that determines whether a + // precedence rule should continue or complete. + if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { + maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() + + if s3, ok := maybeLoopEndState.(*LoopEndState); ok { + var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) + + if s3.epsilonOnlyTransitions && ok2 { + state.(*StarLoopEntryState).precedenceRuleDecision = true + } + } + } + } +} + +func (a *ATNDeserializer) verifyATN(atn *ATN) { + if !a.deserializationOptions.verifyATN { + return + } + + // Verify assumptions + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + if state == nil { + continue + } + + a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "") + + switch s2 := state.(type) { + case *PlusBlockStartState: + a.checkCondition(s2.loopBackState != nil, "") + + case *StarLoopEntryState: + a.checkCondition(s2.loopBackState != nil, "") + a.checkCondition(len(s2.GetTransitions()) == 2, "") + + switch s2 := state.(type) { + case *StarBlockStartState: + var _, ok2 = s2.GetTransitions()[1].getTarget().(*LoopEndState) + + a.checkCondition(ok2, "") + a.checkCondition(!s2.nonGreedy, "") + + case *LoopEndState: + var s3, ok2 = s2.GetTransitions()[1].getTarget().(*StarBlockStartState) + + a.checkCondition(ok2, "") + a.checkCondition(s3.nonGreedy, "") + + default: + panic("IllegalState") + } + + case *StarLoopbackState: + a.checkCondition(len(state.GetTransitions()) == 1, "") + + var _, ok2 = state.GetTransitions()[0].getTarget().(*StarLoopEntryState) + + a.checkCondition(ok2, "") + + case *LoopEndState: + a.checkCondition(s2.loopBackState != nil, "") + + case *RuleStartState: + a.checkCondition(s2.stopState != nil, "") + + case *BaseBlockStartState: + a.checkCondition(s2.endState != nil, "") + + case *BlockEndState: + a.checkCondition(s2.startState != nil, "") + + case DecisionState: + a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "") + + default: + var _, ok = s2.(*RuleStopState) + + a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "") + } + } +} + +func (a *ATNDeserializer) checkCondition(condition bool, message string) { + if !condition { + if message == "" { + message = "IllegalState" + } + + panic(message) + } +} + +func (a *ATNDeserializer) readInt() int { + v := a.data[a.pos] + + a.pos++ + + return int(v) +} + +func (a *ATNDeserializer) readInt32() int { + var low = a.readInt() + var high = a.readInt() + return low | (high << 16) +} + +//TODO +//func (a *ATNDeserializer) readLong() int64 { +// panic("Not implemented") +// var low = a.readInt32() +// var high = a.readInt32() +// return (low & 0x00000000FFFFFFFF) | (high << int32) +//} + +func createByteToHex() []string { + bth := make([]string, 256) + + for i := 0; i < 256; i++ { + bth[i] = strings.ToUpper(hex.EncodeToString([]byte{byte(i)})) + } + + return bth +} + +var byteToHex = createByteToHex() + +func (a *ATNDeserializer) readUUID() string { + bb := make([]int, 16) + + for i := 7; i >= 0; i-- { + integer := a.readInt() + + bb[(2*i)+1] = integer & 0xFF + bb[2*i] = (integer >> 8) & 0xFF + } + + return byteToHex[bb[0]] + byteToHex[bb[1]] + + byteToHex[bb[2]] + byteToHex[bb[3]] + "-" + + byteToHex[bb[4]] + byteToHex[bb[5]] + "-" + + byteToHex[bb[6]] + byteToHex[bb[7]] + "-" + + byteToHex[bb[8]] + byteToHex[bb[9]] + "-" + + byteToHex[bb[10]] + byteToHex[bb[11]] + + byteToHex[bb[12]] + byteToHex[bb[13]] + + byteToHex[bb[14]] + byteToHex[bb[15]] +} + +func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { + target := atn.states[trg] + + switch typeIndex { + case TransitionEPSILON: + return NewEpsilonTransition(target, -1) + + case TransitionRANGE: + if arg3 != 0 { + return NewRangeTransition(target, TokenEOF, arg2) + } + + return NewRangeTransition(target, arg1, arg2) + + case TransitionRULE: + return NewRuleTransition(atn.states[arg1], arg2, arg3, target) + + case TransitionPREDICATE: + return NewPredicateTransition(target, arg1, arg2, arg3 != 0) + + case TransitionPRECEDENCE: + return NewPrecedencePredicateTransition(target, arg1) + + case TransitionATOM: + if arg3 != 0 { + return NewAtomTransition(target, TokenEOF) + } + + return NewAtomTransition(target, arg1) + + case TransitionACTION: + return NewActionTransition(target, arg1, arg2, arg3 != 0) + + case TransitionSET: + return NewSetTransition(target, sets[arg1]) + + case TransitionNOTSET: + return NewNotSetTransition(target, sets[arg1]) + + case TransitionWILDCARD: + return NewWildcardTransition(target) + } + + panic("The specified transition type is not valid.") +} + +func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState { + var s ATNState + + switch typeIndex { + case ATNStateInvalidType: + return nil + + case ATNStateBasic: + s = NewBasicState() + + case ATNStateRuleStart: + s = NewRuleStartState() + + case ATNStateBlockStart: + s = NewBasicBlockStartState() + + case ATNStatePlusBlockStart: + s = NewPlusBlockStartState() + + case ATNStateStarBlockStart: + s = NewStarBlockStartState() + + case ATNStateTokenStart: + s = NewTokensStartState() + + case ATNStateRuleStop: + s = NewRuleStopState() + + case ATNStateBlockEnd: + s = NewBlockEndState() + + case ATNStateStarLoopBack: + s = NewStarLoopbackState() + + case ATNStateStarLoopEntry: + s = NewStarLoopEntryState() + + case ATNStatePlusLoopBack: + s = NewPlusLoopbackState() + + case ATNStateLoopEnd: + s = NewLoopEndState() + + default: + panic(fmt.Sprintf("state type %d is invalid", typeIndex)) + } + + s.SetRuleIndex(ruleIndex) + + return s +} + +func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction { + switch typeIndex { + case LexerActionTypeChannel: + return NewLexerChannelAction(data1) + + case LexerActionTypeCustom: + return NewLexerCustomAction(data1, data2) + + case LexerActionTypeMode: + return NewLexerModeAction(data1) + + case LexerActionTypeMore: + return LexerMoreActionINSTANCE + + case LexerActionTypePopMode: + return LexerPopModeActionINSTANCE + + case LexerActionTypePushMode: + return NewLexerPushModeAction(data1) + + case LexerActionTypeSkip: + return LexerSkipActionINSTANCE + + case LexerActionTypeType: + return NewLexerTypeAction(data1) + + default: + panic(fmt.Sprintf("lexer action %d is invalid", typeIndex)) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go new file mode 100644 index 00000000000..d5454d6d5dd --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go @@ -0,0 +1,50 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false)) + +type IATNSimulator interface { + SharedContextCache() *PredictionContextCache + ATN() *ATN + DecisionToDFA() []*DFA +} + +type BaseATNSimulator struct { + atn *ATN + sharedContextCache *PredictionContextCache + decisionToDFA []*DFA +} + +func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator { + b := new(BaseATNSimulator) + + b.atn = atn + b.sharedContextCache = sharedContextCache + + return b +} + +func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext { + if b.sharedContextCache == nil { + return context + } + + visited := make(map[PredictionContext]PredictionContext) + + return getCachedBasePredictionContext(context, b.sharedContextCache, visited) +} + +func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { + return b.sharedContextCache +} + +func (b *BaseATNSimulator) ATN() *ATN { + return b.atn +} + +func (b *BaseATNSimulator) DecisionToDFA() []*DFA { + return b.decisionToDFA +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go new file mode 100644 index 00000000000..563d5db38d4 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go @@ -0,0 +1,386 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "strconv" + +// Constants for serialization. +const ( + ATNStateInvalidType = 0 + ATNStateBasic = 1 + ATNStateRuleStart = 2 + ATNStateBlockStart = 3 + ATNStatePlusBlockStart = 4 + ATNStateStarBlockStart = 5 + ATNStateTokenStart = 6 + ATNStateRuleStop = 7 + ATNStateBlockEnd = 8 + ATNStateStarLoopBack = 9 + ATNStateStarLoopEntry = 10 + ATNStatePlusLoopBack = 11 + ATNStateLoopEnd = 12 + + ATNStateInvalidStateNumber = -1 +) + +var ATNStateInitialNumTransitions = 4 + +type ATNState interface { + GetEpsilonOnlyTransitions() bool + + GetRuleIndex() int + SetRuleIndex(int) + + GetNextTokenWithinRule() *IntervalSet + SetNextTokenWithinRule(*IntervalSet) + + GetATN() *ATN + SetATN(*ATN) + + GetStateType() int + + GetStateNumber() int + SetStateNumber(int) + + GetTransitions() []Transition + SetTransitions([]Transition) + AddTransition(Transition, int) + + String() string + hash() int +} + +type BaseATNState struct { + // NextTokenWithinRule caches lookahead during parsing. Not used during construction. + NextTokenWithinRule *IntervalSet + + // atn is the current ATN. + atn *ATN + + epsilonOnlyTransitions bool + + // ruleIndex tracks the Rule index because there are no Rule objects at runtime. + ruleIndex int + + stateNumber int + + stateType int + + // Track the transitions emanating from this ATN state. + transitions []Transition +} + +func NewBaseATNState() *BaseATNState { + return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} +} + +func (as *BaseATNState) GetRuleIndex() int { + return as.ruleIndex +} + +func (as *BaseATNState) SetRuleIndex(v int) { + as.ruleIndex = v +} +func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { + return as.epsilonOnlyTransitions +} + +func (as *BaseATNState) GetATN() *ATN { + return as.atn +} + +func (as *BaseATNState) SetATN(atn *ATN) { + as.atn = atn +} + +func (as *BaseATNState) GetTransitions() []Transition { + return as.transitions +} + +func (as *BaseATNState) SetTransitions(t []Transition) { + as.transitions = t +} + +func (as *BaseATNState) GetStateType() int { + return as.stateType +} + +func (as *BaseATNState) GetStateNumber() int { + return as.stateNumber +} + +func (as *BaseATNState) SetStateNumber(stateNumber int) { + as.stateNumber = stateNumber +} + +func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { + return as.NextTokenWithinRule +} + +func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { + as.NextTokenWithinRule = v +} + +func (as *BaseATNState) hash() int { + return as.stateNumber +} + +func (as *BaseATNState) String() string { + return strconv.Itoa(as.stateNumber) +} + +func (as *BaseATNState) equals(other interface{}) bool { + if ot, ok := other.(ATNState); ok { + return as.stateNumber == ot.GetStateNumber() + } + + return false +} + +func (as *BaseATNState) isNonGreedyExitState() bool { + return false +} + +func (as *BaseATNState) AddTransition(trans Transition, index int) { + if len(as.transitions) == 0 { + as.epsilonOnlyTransitions = trans.getIsEpsilon() + } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { + as.epsilonOnlyTransitions = false + } + + if index == -1 { + as.transitions = append(as.transitions, trans) + } else { + as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) + // TODO: as.transitions.splice(index, 1, trans) + } +} + +type BasicState struct { + *BaseATNState +} + +func NewBasicState() *BasicState { + b := NewBaseATNState() + + b.stateType = ATNStateBasic + + return &BasicState{BaseATNState: b} +} + +type DecisionState interface { + ATNState + + getDecision() int + setDecision(int) + + getNonGreedy() bool + setNonGreedy(bool) +} + +type BaseDecisionState struct { + *BaseATNState + decision int + nonGreedy bool +} + +func NewBaseDecisionState() *BaseDecisionState { + return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1} +} + +func (s *BaseDecisionState) getDecision() int { + return s.decision +} + +func (s *BaseDecisionState) setDecision(b int) { + s.decision = b +} + +func (s *BaseDecisionState) getNonGreedy() bool { + return s.nonGreedy +} + +func (s *BaseDecisionState) setNonGreedy(b bool) { + s.nonGreedy = b +} + +type BlockStartState interface { + DecisionState + + getEndState() *BlockEndState + setEndState(*BlockEndState) +} + +// BaseBlockStartState is the start of a regular (...) block. +type BaseBlockStartState struct { + *BaseDecisionState + endState *BlockEndState +} + +func NewBlockStartState() *BaseBlockStartState { + return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()} +} + +func (s *BaseBlockStartState) getEndState() *BlockEndState { + return s.endState +} + +func (s *BaseBlockStartState) setEndState(b *BlockEndState) { + s.endState = b +} + +type BasicBlockStartState struct { + *BaseBlockStartState +} + +func NewBasicBlockStartState() *BasicBlockStartState { + b := NewBlockStartState() + + b.stateType = ATNStateBlockStart + + return &BasicBlockStartState{BaseBlockStartState: b} +} + +// BlockEndState is a terminal node of a simple (a|b|c) block. +type BlockEndState struct { + *BaseATNState + startState ATNState +} + +func NewBlockEndState() *BlockEndState { + b := NewBaseATNState() + + b.stateType = ATNStateBlockEnd + + return &BlockEndState{BaseATNState: b} +} + +// RuleStopState is the last node in the ATN for a rule, unless that rule is the +// start symbol. In that case, there is one transition to EOF. Later, we might +// encode references to all calls to this rule to compute FOLLOW sets for error +// handling. +type RuleStopState struct { + *BaseATNState +} + +func NewRuleStopState() *RuleStopState { + b := NewBaseATNState() + + b.stateType = ATNStateRuleStop + + return &RuleStopState{BaseATNState: b} +} + +type RuleStartState struct { + *BaseATNState + stopState ATNState + isPrecedenceRule bool +} + +func NewRuleStartState() *RuleStartState { + b := NewBaseATNState() + + b.stateType = ATNStateRuleStart + + return &RuleStartState{BaseATNState: b} +} + +// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two +// transitions: one to the loop back to start of the block, and one to exit. +type PlusLoopbackState struct { + *BaseDecisionState +} + +func NewPlusLoopbackState() *PlusLoopbackState { + b := NewBaseDecisionState() + + b.stateType = ATNStatePlusLoopBack + + return &PlusLoopbackState{BaseDecisionState: b} +} + +// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a +// decision state; we don't use it for code generation. Somebody might need it, +// it is included for completeness. In reality, PlusLoopbackState is the real +// decision-making node for A+. +type PlusBlockStartState struct { + *BaseBlockStartState + loopBackState ATNState +} + +func NewPlusBlockStartState() *PlusBlockStartState { + b := NewBlockStartState() + + b.stateType = ATNStatePlusBlockStart + + return &PlusBlockStartState{BaseBlockStartState: b} +} + +// StarBlockStartState is the block that begins a closure loop. +type StarBlockStartState struct { + *BaseBlockStartState +} + +func NewStarBlockStartState() *StarBlockStartState { + b := NewBlockStartState() + + b.stateType = ATNStateStarBlockStart + + return &StarBlockStartState{BaseBlockStartState: b} +} + +type StarLoopbackState struct { + *BaseATNState +} + +func NewStarLoopbackState() *StarLoopbackState { + b := NewBaseATNState() + + b.stateType = ATNStateStarLoopBack + + return &StarLoopbackState{BaseATNState: b} +} + +type StarLoopEntryState struct { + *BaseDecisionState + loopBackState ATNState + precedenceRuleDecision bool +} + +func NewStarLoopEntryState() *StarLoopEntryState { + b := NewBaseDecisionState() + + b.stateType = ATNStateStarLoopEntry + + // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. + return &StarLoopEntryState{BaseDecisionState: b} +} + +// LoopEndState marks the end of a * or + loop. +type LoopEndState struct { + *BaseATNState + loopBackState ATNState +} + +func NewLoopEndState() *LoopEndState { + b := NewBaseATNState() + + b.stateType = ATNStateLoopEnd + + return &LoopEndState{BaseATNState: b} +} + +// TokensStartState is the Tokens rule start state linking to each lexer rule start state. +type TokensStartState struct { + *BaseDecisionState +} + +func NewTokensStartState() *TokensStartState { + b := NewBaseDecisionState() + + b.stateType = ATNStateTokenStart + + return &TokensStartState{BaseDecisionState: b} +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go new file mode 100644 index 00000000000..a7b48976b31 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go @@ -0,0 +1,11 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// Represent the type of recognizer an ATN applies to. +const ( + ATNTypeLexer = 0 + ATNTypeParser = 1 +) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go new file mode 100644 index 00000000000..70c1207f7ff --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type CharStream interface { + IntStream + GetText(int, int) string + GetTextFromTokens(start, end Token) string + GetTextFromInterval(*Interval) string +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go new file mode 100644 index 00000000000..330ff8f31f8 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go @@ -0,0 +1,56 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// TokenFactory creates CommonToken objects. +type TokenFactory interface { + Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token +} + +// CommonTokenFactory is the default TokenFactory implementation. +type CommonTokenFactory struct { + // copyText indicates whether CommonToken.setText should be called after + // constructing tokens to explicitly set the text. This is useful for cases + // where the input stream might not be able to provide arbitrary substrings of + // text from the input after the lexer creates a token (e.g. the + // implementation of CharStream.GetText in UnbufferedCharStream panics an + // UnsupportedOperationException). Explicitly setting the token text allows + // Token.GetText to be called at any time regardless of the input stream + // implementation. + // + // The default value is false to avoid the performance and memory overhead of + // copying text for every token unless explicitly requested. + copyText bool +} + +func NewCommonTokenFactory(copyText bool) *CommonTokenFactory { + return &CommonTokenFactory{copyText: copyText} +} + +// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not +// explicitly copy token text when constructing tokens. +var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false) + +func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token { + t := NewCommonToken(source, ttype, channel, start, stop) + + t.line = line + t.column = column + + if text != "" { + t.SetText(text) + } else if c.copyText && source.charStream != nil { + t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop))) + } + + return t +} + +func (c *CommonTokenFactory) createThin(ttype int, text string) Token { + t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1) + t.SetText(text) + + return t +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go new file mode 100644 index 00000000000..c90e9b8904c --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go @@ -0,0 +1,447 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// CommonTokenStream is an implementation of TokenStream that loads tokens from +// a TokenSource on-demand and places the tokens in a buffer to provide access +// to any previous token by index. This token stream ignores the value of +// Token.getChannel. If your parser requires the token stream filter tokens to +// only those on a particular channel, such as Token.DEFAULT_CHANNEL or +// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream. +type CommonTokenStream struct { + channel int + + // fetchedEOF indicates whether the Token.EOF token has been fetched from + // tokenSource and added to tokens. This field improves performance for the + // following cases: + // + // consume: The lookahead check in consume to preven consuming the EOF symbol is + // optimized by checking the values of fetchedEOF and p instead of calling LA. + // + // fetch: The check to prevent adding multiple EOF symbols into tokens is + // trivial with bt field. + fetchedEOF bool + + // index indexs into tokens of the current token (next token to consume). + // tokens[p] should be LT(1). It is set to -1 when the stream is first + // constructed or when SetTokenSource is called, indicating that the first token + // has not yet been fetched from the token source. For additional information, + // see the documentation of IntStream for a description of initializing methods. + index int + + // tokenSource is the TokenSource from which tokens for the bt stream are + // fetched. + tokenSource TokenSource + + // tokens is all tokens fetched from the token source. The list is considered a + // complete view of the input once fetchedEOF is set to true. + tokens []Token +} + +func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { + return &CommonTokenStream{ + channel: channel, + index: -1, + tokenSource: lexer, + tokens: make([]Token, 0), + } +} + +func (c *CommonTokenStream) GetAllTokens() []Token { + return c.tokens +} + +func (c *CommonTokenStream) Mark() int { + return 0 +} + +func (c *CommonTokenStream) Release(marker int) {} + +func (c *CommonTokenStream) reset() { + c.Seek(0) +} + +func (c *CommonTokenStream) Seek(index int) { + c.lazyInit() + c.index = c.adjustSeekIndex(index) +} + +func (c *CommonTokenStream) Get(index int) Token { + c.lazyInit() + + return c.tokens[index] +} + +func (c *CommonTokenStream) Consume() { + SkipEOFCheck := false + + if c.index >= 0 { + if c.fetchedEOF { + // The last token in tokens is EOF. Skip the check if p indexes any fetched. + // token except the last. + SkipEOFCheck = c.index < len(c.tokens)-1 + } else { + // No EOF token in tokens. Skip the check if p indexes a fetched token. + SkipEOFCheck = c.index < len(c.tokens) + } + } else { + // Not yet initialized + SkipEOFCheck = false + } + + if !SkipEOFCheck && c.LA(1) == TokenEOF { + panic("cannot consume EOF") + } + + if c.Sync(c.index + 1) { + c.index = c.adjustSeekIndex(c.index + 1) + } +} + +// Sync makes sure index i in tokens has a token and returns true if a token is +// located at index i and otherwise false. +func (c *CommonTokenStream) Sync(i int) bool { + n := i - len(c.tokens) + 1 // TODO: How many more elements do we need? + + if n > 0 { + fetched := c.fetch(n) + return fetched >= n + } + + return true +} + +// fetch adds n elements to buffer and returns the actual number of elements +// added to the buffer. +func (c *CommonTokenStream) fetch(n int) int { + if c.fetchedEOF { + return 0 + } + + for i := 0; i < n; i++ { + t := c.tokenSource.NextToken() + + t.SetTokenIndex(len(c.tokens)) + c.tokens = append(c.tokens, t) + + if t.GetTokenType() == TokenEOF { + c.fetchedEOF = true + + return i + 1 + } + } + + return n +} + +// GetTokens gets all tokens from start to stop inclusive. +func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token { + if start < 0 || stop < 0 { + return nil + } + + c.lazyInit() + + subset := make([]Token, 0) + + if stop >= len(c.tokens) { + stop = len(c.tokens) - 1 + } + + for i := start; i < stop; i++ { + t := c.tokens[i] + + if t.GetTokenType() == TokenEOF { + break + } + + if types == nil || types.contains(t.GetTokenType()) { + subset = append(subset, t) + } + } + + return subset +} + +func (c *CommonTokenStream) LA(i int) int { + return c.LT(i).GetTokenType() +} + +func (c *CommonTokenStream) lazyInit() { + if c.index == -1 { + c.setup() + } +} + +func (c *CommonTokenStream) setup() { + c.Sync(0) + c.index = c.adjustSeekIndex(0) +} + +func (c *CommonTokenStream) GetTokenSource() TokenSource { + return c.tokenSource +} + +// SetTokenSource resets the c token stream by setting its token source. +func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { + c.tokenSource = tokenSource + c.tokens = make([]Token, 0) + c.index = -1 +} + +// NextTokenOnChannel returns the index of the next token on channel given a +// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are +// no tokens on channel between i and EOF. +func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int { + c.Sync(i) + + if i >= len(c.tokens) { + return -1 + } + + token := c.tokens[i] + + for token.GetChannel() != c.channel { + if token.GetTokenType() == TokenEOF { + return -1 + } + + i++ + c.Sync(i) + token = c.tokens[i] + } + + return i +} + +// previousTokenOnChannel returns the index of the previous token on channel +// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if +// there are no tokens on channel between i and 0. +func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int { + for i >= 0 && c.tokens[i].GetChannel() != channel { + i-- + } + + return i +} + +// GetHiddenTokensToRight collects all tokens on a specified channel to the +// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL +// or EOF. If channel is -1, it finds any non-default channel token. +func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token { + c.lazyInit() + + if tokenIndex < 0 || tokenIndex >= len(c.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) + } + + nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) + from := tokenIndex + 1 + + // If no onchannel to the right, then nextOnChannel == -1, so set to to last token + var to int + + if nextOnChannel == -1 { + to = len(c.tokens) - 1 + } else { + to = nextOnChannel + } + + return c.filterForChannel(from, to, channel) +} + +// GetHiddenTokensToLeft collects all tokens on channel to the left of the +// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is +// -1, it finds any non default channel token. +func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token { + c.lazyInit() + + if tokenIndex < 0 || tokenIndex >= len(c.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) + } + + prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel) + + if prevOnChannel == tokenIndex-1 { + return nil + } + + // If there are none on channel to the left and prevOnChannel == -1 then from = 0 + from := prevOnChannel + 1 + to := tokenIndex - 1 + + return c.filterForChannel(from, to, channel) +} + +func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token { + hidden := make([]Token, 0) + + for i := left; i < right+1; i++ { + t := c.tokens[i] + + if channel == -1 { + if t.GetChannel() != LexerDefaultTokenChannel { + hidden = append(hidden, t) + } + } else if t.GetChannel() == channel { + hidden = append(hidden, t) + } + } + + if len(hidden) == 0 { + return nil + } + + return hidden +} + +func (c *CommonTokenStream) GetSourceName() string { + return c.tokenSource.GetSourceName() +} + +func (c *CommonTokenStream) Size() int { + return len(c.tokens) +} + +func (c *CommonTokenStream) Index() int { + return c.index +} + +func (c *CommonTokenStream) GetAllText() string { + return c.GetTextFromInterval(nil) +} + +func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { + if start == nil || end == nil { + return "" + } + + return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex())) +} + +func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string { + return c.GetTextFromInterval(interval.GetSourceInterval()) +} + +func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { + c.lazyInit() + c.Fill() + + if interval == nil { + interval = NewInterval(0, len(c.tokens)-1) + } + + start := interval.Start + stop := interval.Stop + + if start < 0 || stop < 0 { + return "" + } + + if stop >= len(c.tokens) { + stop = len(c.tokens) - 1 + } + + s := "" + + for i := start; i < stop+1; i++ { + t := c.tokens[i] + + if t.GetTokenType() == TokenEOF { + break + } + + s += t.GetText() + } + + return s +} + +// Fill gets all tokens from the lexer until EOF. +func (c *CommonTokenStream) Fill() { + c.lazyInit() + + for c.fetch(1000) == 1000 { + continue + } +} + +func (c *CommonTokenStream) adjustSeekIndex(i int) int { + return c.NextTokenOnChannel(i, c.channel) +} + +func (c *CommonTokenStream) LB(k int) Token { + if k == 0 || c.index-k < 0 { + return nil + } + + i := c.index + n := 1 + + // Find k good tokens looking backward + for n <= k { + // Skip off-channel tokens + i = c.previousTokenOnChannel(i-1, c.channel) + n++ + } + + if i < 0 { + return nil + } + + return c.tokens[i] +} + +func (c *CommonTokenStream) LT(k int) Token { + c.lazyInit() + + if k == 0 { + return nil + } + + if k < 0 { + return c.LB(-k) + } + + i := c.index + n := 1 // We know tokens[n] is valid + + // Find k good tokens + for n < k { + // Skip off-channel tokens, but make sure to not look past EOF + if c.Sync(i + 1) { + i = c.NextTokenOnChannel(i+1, c.channel) + } + + n++ + } + + return c.tokens[i] +} + +// getNumberOfOnChannelTokens counts EOF once. +func (c *CommonTokenStream) getNumberOfOnChannelTokens() int { + var n int + + c.Fill() + + for i := 0; i < len(c.tokens); i++ { + t := c.tokens[i] + + if t.GetChannel() == c.channel { + n++ + } + + if t.GetTokenType() == TokenEOF { + break + } + } + + return n +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go new file mode 100644 index 00000000000..d6079aa203b --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go @@ -0,0 +1,171 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "sort" + "sync" +) + +type DFA struct { + // atnStartState is the ATN state in which this was created + atnStartState DecisionState + + decision int + + // states is all the DFA states. Use Map to get the old state back; Set can only + // indicate whether it is there. + states map[int]*DFAState + statesMu sync.RWMutex + + s0 *DFAState + s0Mu sync.RWMutex + + // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa. + // True if the DFA is for a precedence decision and false otherwise. + precedenceDfa bool +} + +func NewDFA(atnStartState DecisionState, decision int) *DFA { + return &DFA{ + atnStartState: atnStartState, + decision: decision, + states: make(map[int]*DFAState), + } +} + +// getPrecedenceStartState gets the start state for the current precedence and +// returns the start state corresponding to the specified precedence if a start +// state exists for the specified precedence and nil otherwise. d must be a +// precedence DFA. See also isPrecedenceDfa. +func (d *DFA) getPrecedenceStartState(precedence int) *DFAState { + if !d.precedenceDfa { + panic("only precedence DFAs may contain a precedence start state") + } + + d.s0Mu.RLock() + defer d.s0Mu.RUnlock() + + // s0.edges is never nil for a precedence DFA + if precedence < 0 || precedence >= len(d.s0.edges) { + return nil + } + + return d.s0.edges[precedence] +} + +// setPrecedenceStartState sets the start state for the current precedence. d +// must be a precedence DFA. See also isPrecedenceDfa. +func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) { + if !d.precedenceDfa { + panic("only precedence DFAs may contain a precedence start state") + } + + if precedence < 0 { + return + } + + d.s0Mu.Lock() + defer d.s0Mu.Unlock() + + // Synchronization on s0 here is ok. When the DFA is turned into a + // precedence DFA, s0 will be initialized once and not updated again. s0.edges + // is never nil for a precedence DFA. + if precedence >= len(d.s0.edges) { + d.s0.edges = append(d.s0.edges, make([]*DFAState, precedence+1-len(d.s0.edges))...) + } + + d.s0.edges[precedence] = startState +} + +// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs +// from the current DFA configuration, then d.states is cleared, the initial +// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to +// store the start states for individual precedence values if precedenceDfa is +// true or nil otherwise, and d.precedenceDfa is updated. +func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { + if d.precedenceDfa != precedenceDfa { + d.states = make(map[int]*DFAState) + + if precedenceDfa { + precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false)) + + precedenceState.edges = make([]*DFAState, 0) + precedenceState.isAcceptState = false + precedenceState.requiresFullContext = false + d.s0 = precedenceState + } else { + d.s0 = nil + } + + d.precedenceDfa = precedenceDfa + } +} + +func (d *DFA) getS0() *DFAState { + d.s0Mu.RLock() + defer d.s0Mu.RUnlock() + return d.s0 +} + +func (d *DFA) setS0(s *DFAState) { + d.s0Mu.Lock() + defer d.s0Mu.Unlock() + d.s0 = s +} + +func (d *DFA) getState(hash int) (*DFAState, bool) { + d.statesMu.RLock() + defer d.statesMu.RUnlock() + s, ok := d.states[hash] + return s, ok +} + +func (d *DFA) setState(hash int, state *DFAState) { + d.statesMu.Lock() + defer d.statesMu.Unlock() + d.states[hash] = state +} + +func (d *DFA) numStates() int { + d.statesMu.RLock() + defer d.statesMu.RUnlock() + return len(d.states) +} + +type dfaStateList []*DFAState + +func (d dfaStateList) Len() int { return len(d) } +func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber } +func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] } + +// sortedStates returns the states in d sorted by their state number. +func (d *DFA) sortedStates() []*DFAState { + vs := make([]*DFAState, 0, len(d.states)) + + for _, v := range d.states { + vs = append(vs, v) + } + + sort.Sort(dfaStateList(vs)) + + return vs +} + +func (d *DFA) String(literalNames []string, symbolicNames []string) string { + if d.s0 == nil { + return "" + } + + return NewDFASerializer(d, literalNames, symbolicNames).String() +} + +func (d *DFA) ToLexerString() string { + if d.s0 == nil { + return "" + } + + return NewLexerDFASerializer(d).String() +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go new file mode 100644 index 00000000000..4c0f690229a --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go @@ -0,0 +1,152 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// DFASerializer is a DFA walker that knows how to dump them to serialized +// strings. +type DFASerializer struct { + dfa *DFA + literalNames []string + symbolicNames []string +} + +func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer { + if literalNames == nil { + literalNames = make([]string, 0) + } + + if symbolicNames == nil { + symbolicNames = make([]string, 0) + } + + return &DFASerializer{ + dfa: dfa, + literalNames: literalNames, + symbolicNames: symbolicNames, + } +} + +func (d *DFASerializer) String() string { + if d.dfa.s0 == nil { + return "" + } + + buf := "" + states := d.dfa.sortedStates() + + for _, s := range states { + if s.edges != nil { + n := len(s.edges) + + for j := 0; j < n; j++ { + t := s.edges[j] + + if t != nil && t.stateNumber != 0x7FFFFFFF { + buf += d.GetStateString(s) + buf += "-" + buf += d.getEdgeLabel(j) + buf += "->" + buf += d.GetStateString(t) + buf += "\n" + } + } + } + } + + if len(buf) == 0 { + return "" + } + + return buf +} + +func (d *DFASerializer) getEdgeLabel(i int) string { + if i == 0 { + return "EOF" + } else if d.literalNames != nil && i-1 < len(d.literalNames) { + return d.literalNames[i-1] + } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) { + return d.symbolicNames[i-1] + } + + return strconv.Itoa(i - 1) +} + +func (d *DFASerializer) GetStateString(s *DFAState) string { + var a, b string + + if s.isAcceptState { + a = ":" + } + + if s.requiresFullContext { + b = "^" + } + + baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b + + if s.isAcceptState { + if s.predicates != nil { + return baseStateStr + "=>" + fmt.Sprint(s.predicates) + } + + return baseStateStr + "=>" + fmt.Sprint(s.prediction) + } + + return baseStateStr +} + +type LexerDFASerializer struct { + *DFASerializer +} + +func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer { + return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)} +} + +func (l *LexerDFASerializer) getEdgeLabel(i int) string { + return "'" + string(i) + "'" +} + +func (l *LexerDFASerializer) String() string { + if l.dfa.s0 == nil { + return "" + } + + buf := "" + states := l.dfa.sortedStates() + + for i := 0; i < len(states); i++ { + s := states[i] + + if s.edges != nil { + n := len(s.edges) + + for j := 0; j < n; j++ { + t := s.edges[j] + + if t != nil && t.stateNumber != 0x7FFFFFFF { + buf += l.GetStateString(s) + buf += "-" + buf += l.getEdgeLabel(j) + buf += "->" + buf += l.GetStateString(t) + buf += "\n" + } + } + } + } + + if len(buf) == 0 { + return "" + } + + return buf +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go new file mode 100644 index 00000000000..38e918ad91e --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go @@ -0,0 +1,166 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// PredPrediction maps a predicate to a predicted alternative. +type PredPrediction struct { + alt int + pred SemanticContext +} + +func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction { + return &PredPrediction{alt: alt, pred: pred} +} + +func (p *PredPrediction) String() string { + return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" +} + +// DFAState represents a set of possible ATN configurations. As Aho, Sethi, +// Ullman p. 117 says: "The DFA uses its state to keep track of all possible +// states the ATN can be in after reading each input symbol. That is to say, +// after reading input a1a2..an, the DFA is in a state that represents the +// subset T of the states of the ATN that are reachable from the ATN's start +// state along some path labeled a1a2..an." In conventional NFA-to-DFA +// conversion, therefore, the subset T would be a bitset representing the set of +// states the ATN could be in. We need to track the alt predicted by each state +// as well, however. More importantly, we need to maintain a stack of states, +// tracking the closure operations as they jump from rule to rule, emulating +// rule invocations (method calls). I have to add a stack to simulate the proper +// lookahead sequences for the underlying LL grammar from which the ATN was +// derived. +// +// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a +// state (ala normal conversion) and a RuleContext describing the chain of rules +// (if any) followed to arrive at that state. +// +// A DFAState may have multiple references to a particular state, but with +// different ATN contexts (with same or different alts) meaning that state was +// reached via a different set of rule invocations. +type DFAState struct { + stateNumber int + configs ATNConfigSet + + // edges elements point to the target of the symbol. Shift up by 1 so (-1) + // Token.EOF maps to the first element. + edges []*DFAState + + isAcceptState bool + + // prediction is the ttype we match or alt we predict if the state is accept. + // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or + // requiresFullContext. + prediction int + + lexerActionExecutor *LexerActionExecutor + + // requiresFullContext indicates it was created during an SLL prediction that + // discovered a conflict between the configurations in the state. Future + // ParserATNSimulator.execATN invocations immediately jump doing + // full context prediction if true. + requiresFullContext bool + + // predicates is the predicates associated with the ATN configurations of the + // DFA state during SLL parsing. When we have predicates, requiresFullContext + // is false, since full context prediction evaluates predicates on-the-fly. If + // d is + // not nil, then prediction is ATN.INVALID_ALT_NUMBER. + // + // We only use these for non-requiresFullContext but conflicting states. That + // means we know from the context (it's $ or we don't dip into outer context) + // that it's an ambiguity not a conflict. + // + // This list is computed by + // ParserATNSimulator.predicateDFAState. + predicates []*PredPrediction +} + +func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState { + if configs == nil { + configs = NewBaseATNConfigSet(false) + } + + return &DFAState{configs: configs, stateNumber: stateNumber} +} + +// GetAltSet gets the set of all alts mentioned by all ATN configurations in d. +func (d *DFAState) GetAltSet() *Set { + alts := NewSet(nil, nil) + + if d.configs != nil { + for _, c := range d.configs.GetItems() { + alts.add(c.GetAlt()) + } + } + + if alts.length() == 0 { + return nil + } + + return alts +} + +func (d *DFAState) setPrediction(v int) { + d.prediction = v +} + +// equals returns whether d equals other. Two DFAStates are equal if their ATN +// configuration sets are the same. This method is used to see if a state +// already exists. +// +// Because the number of alternatives and number of ATN configurations are +// finite, there is a finite number of DFA states that can be processed. This is +// necessary to show that the algorithm terminates. +// +// Cannot test the DFA state numbers here because in +// ParserATNSimulator.addDFAState we need to know if any other state exists that +// has d exact set of ATN configurations. The stateNumber is irrelevant. +func (d *DFAState) equals(other interface{}) bool { + if d == other { + return true + } else if _, ok := other.(*DFAState); !ok { + return false + } + + return d.configs.Equals(other.(*DFAState).configs) +} + +func (d *DFAState) String() string { + var s string + if d.isAcceptState { + if d.predicates != nil { + s = "=>" + fmt.Sprint(d.predicates) + } else { + s = "=>" + fmt.Sprint(d.prediction) + } + } + + return fmt.Sprintf("%d:%s%s", fmt.Sprint(d.configs), s) +} + +func (d *DFAState) hash() int { + h := murmurInit(11) + + c := 1 + if d.isAcceptState { + if d.predicates != nil { + for _, p := range d.predicates { + h = murmurUpdate(h, p.alt) + h = murmurUpdate(h, p.pred.hash()) + c += 2 + } + } else { + h = murmurUpdate(h, d.prediction) + c += 1 + } + } + + h = murmurUpdate(h, d.configs.hash()) + return murmurFinish(h, c) +} \ No newline at end of file diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go new file mode 100644 index 00000000000..1fec43d9dca --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go @@ -0,0 +1,111 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// +// This implementation of {@link ANTLRErrorListener} can be used to identify +// certain potential correctness and performance problems in grammars. "reports" +// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate +// message. +// +//
    +//
  • Ambiguities: These are cases where more than one path through the +// grammar can Match the input.
  • +//
  • Weak context sensitivity: These are cases where full-context +// prediction resolved an SLL conflict to a unique alternative which equaled the +// minimum alternative of the SLL conflict.
  • +//
  • Strong (forced) context sensitivity: These are cases where the +// full-context prediction resolved an SLL conflict to a unique alternative, +// and the minimum alternative of the SLL conflict was found to not be +// a truly viable alternative. Two-stage parsing cannot be used for inputs where +// d situation occurs.
  • +//
+ +type DiagnosticErrorListener struct { + *DefaultErrorListener + + exactOnly bool +} + +func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { + + n := new(DiagnosticErrorListener) + + // whether all ambiguities or only exact ambiguities are Reported. + n.exactOnly = exactOnly + return n +} + +func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + if d.exactOnly && !exact { + return + } + msg := "reportAmbiguity d=" + + d.getDecisionDescription(recognizer, dfa) + + ": ambigAlts=" + + d.getConflictingAlts(ambigAlts, configs).String() + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { + + msg := "reportAttemptingFullContext d=" + + d.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { + msg := "reportContextSensitivity d=" + + d.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string { + decision := dfa.decision + ruleIndex := dfa.atnStartState.GetRuleIndex() + + ruleNames := recognizer.GetRuleNames() + if ruleIndex < 0 || ruleIndex >= len(ruleNames) { + return strconv.Itoa(decision) + } + ruleName := ruleNames[ruleIndex] + if ruleName == "" { + return strconv.Itoa(decision) + } + return strconv.Itoa(decision) + " (" + ruleName + ")" +} + +// +// Computes the set of conflicting or ambiguous alternatives from a +// configuration set, if that information was not already provided by the +// parser. +// +// @param ReportedAlts The set of conflicting or ambiguous alternatives, as +// Reported by the parser. +// @param configs The conflicting or ambiguous configuration set. +// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise +// returns the set of alternatives represented in {@code configs}. +// +func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet { + if ReportedAlts != nil { + return ReportedAlts + } + result := NewBitSet() + for _, c := range set.GetItems() { + result.add(c.GetAlt()) + } + + return result +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go new file mode 100644 index 00000000000..028e1a9d7f0 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go @@ -0,0 +1,108 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "os" + "strconv" +) + +// Provides an empty default implementation of {@link ANTLRErrorListener}. The +// default implementation of each method does nothing, but can be overridden as +// necessary. + +type ErrorListener interface { + SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) + ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) + ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) + ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) +} + +type DefaultErrorListener struct { +} + +func NewDefaultErrorListener() *DefaultErrorListener { + return new(DefaultErrorListener) +} + +func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { +} + +func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { +} + +type ConsoleErrorListener struct { + *DefaultErrorListener +} + +func NewConsoleErrorListener() *ConsoleErrorListener { + return new(ConsoleErrorListener) +} + +// +// Provides a default instance of {@link ConsoleErrorListener}. +// +var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() + +// +// {@inheritDoc} +// +//

+// This implementation prints messages to {@link System//err} containing the +// values of {@code line}, {@code charPositionInLine}, and {@code msg} using +// the following format.

+// +//
+// line line:charPositionInLine msg
+// 
+// +func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { + fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) +} + +type ProxyErrorListener struct { + *DefaultErrorListener + delegates []ErrorListener +} + +func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { + if delegates == nil { + panic("delegates is not provided") + } + l := new(ProxyErrorListener) + l.delegates = delegates + return l +} + +func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { + for _, d := range p.delegates { + d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) + } +} + +func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { + for _, d := range p.delegates { + d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go new file mode 100644 index 00000000000..977a6e45496 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go @@ -0,0 +1,758 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +type ErrorStrategy interface { + reset(Parser) + RecoverInline(Parser) Token + Recover(Parser, RecognitionException) + Sync(Parser) + inErrorRecoveryMode(Parser) bool + ReportError(Parser, RecognitionException) + ReportMatch(Parser) +} + +// This is the default implementation of {@link ANTLRErrorStrategy} used for +// error Reporting and recovery in ANTLR parsers. +// +type DefaultErrorStrategy struct { + errorRecoveryMode bool + lastErrorIndex int + lastErrorStates *IntervalSet +} + +var _ ErrorStrategy = &DefaultErrorStrategy{} + +func NewDefaultErrorStrategy() *DefaultErrorStrategy { + + d := new(DefaultErrorStrategy) + + // Indicates whether the error strategy is currently "recovering from an + // error". This is used to suppress Reporting multiple error messages while + // attempting to recover from a detected syntax error. + // + // @see //inErrorRecoveryMode + // + d.errorRecoveryMode = false + + // The index into the input stream where the last error occurred. + // This is used to prevent infinite loops where an error is found + // but no token is consumed during recovery...another error is found, + // ad nauseum. This is a failsafe mechanism to guarantee that at least + // one token/tree node is consumed for two errors. + // + d.lastErrorIndex = -1 + d.lastErrorStates = nil + return d +} + +//

The default implementation simply calls {@link //endErrorCondition} to +// ensure that the handler is not in error recovery mode.

+func (d *DefaultErrorStrategy) reset(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// +// This method is called to enter error recovery mode when a recognition +// exception is Reported. +// +// @param recognizer the parser instance +// +func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { + d.errorRecoveryMode = true +} + +func (d *DefaultErrorStrategy) inErrorRecoveryMode(recognizer Parser) bool { + return d.errorRecoveryMode +} + +// +// This method is called to leave error recovery mode after recovering from +// a recognition exception. +// +// @param recognizer +// +func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { + d.errorRecoveryMode = false + d.lastErrorStates = nil + d.lastErrorIndex = -1 +} + +// +// {@inheritDoc} +// +//

The default implementation simply calls {@link //endErrorCondition}.

+// +func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// +// {@inheritDoc} +// +//

The default implementation returns immediately if the handler is already +// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} +// and dispatches the Reporting task based on the runtime type of {@code e} +// according to the following table.

+// +//
    +//
  • {@link NoViableAltException}: Dispatches the call to +// {@link //ReportNoViableAlternative}
  • +//
  • {@link InputMisMatchException}: Dispatches the call to +// {@link //ReportInputMisMatch}
  • +//
  • {@link FailedPredicateException}: Dispatches the call to +// {@link //ReportFailedPredicate}
  • +//
  • All other types: calls {@link Parser//NotifyErrorListeners} to Report +// the exception
  • +//
+// +func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { + // if we've already Reported an error and have not Matched a token + // yet successfully, don't Report any errors. + if d.inErrorRecoveryMode(recognizer) { + return // don't Report spurious errors + } + d.beginErrorCondition(recognizer) + + switch t := e.(type) { + default: + fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) + // fmt.Println(e.stack) + recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) + case *NoViableAltException: + d.ReportNoViableAlternative(recognizer, t) + case *InputMisMatchException: + d.ReportInputMisMatch(recognizer, t) + case *FailedPredicateException: + d.ReportFailedPredicate(recognizer, t) + } +} + +// {@inheritDoc} +// +//

The default implementation reSynchronizes the parser by consuming tokens +// until we find one in the reSynchronization set--loosely the set of tokens +// that can follow the current rule.

+// +func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { + + if d.lastErrorIndex == recognizer.GetInputStream().Index() && + d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { + // uh oh, another error at same token index and previously-Visited + // state in ATN must be a case where LT(1) is in the recovery + // token set so nothing got consumed. Consume a single token + // at least to prevent an infinite loop d is a failsafe. + recognizer.Consume() + } + d.lastErrorIndex = recognizer.GetInputStream().Index() + if d.lastErrorStates == nil { + d.lastErrorStates = NewIntervalSet() + } + d.lastErrorStates.addOne(recognizer.GetState()) + followSet := d.getErrorRecoverySet(recognizer) + d.consumeUntil(recognizer, followSet) +} + +// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure +// that the current lookahead symbol is consistent with what were expecting +// at d point in the ATN. You can call d anytime but ANTLR only +// generates code to check before subrules/loops and each iteration. +// +//

Implements Jim Idle's magic Sync mechanism in closures and optional +// subrules. E.g.,

+// +//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+// 
+// +// At the start of a sub rule upon error, {@link //Sync} performs single +// token deletion, if possible. If it can't do that, it bails on the current +// rule and uses the default error recovery, which consumes until the +// reSynchronization set of the current rule. +// +//

If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block +// with an empty alternative), then the expected set includes what follows +// the subrule.

+// +//

During loop iteration, it consumes until it sees a token that can start a +// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to +// stay in the loop as long as possible.

+// +//

ORIGINS

+// +//

Previous versions of ANTLR did a poor job of their recovery within loops. +// A single mismatch token or missing token would force the parser to bail +// out of the entire rules surrounding the loop. So, for rule

+// +//
+// classfunc : 'class' ID '{' member* '}'
+// 
+// +// input with an extra token between members would force the parser to +// consume until it found the next class definition rather than the next +// member definition of the current class. +// +//

This functionality cost a little bit of effort because the parser has to +// compare token set at the start of the loop and at each iteration. If for +// some reason speed is suffering for you, you can turn off d +// functionality by simply overriding d method as a blank { }.

+// +func (d *DefaultErrorStrategy) Sync(recognizer Parser) { + // If already recovering, don't try to Sync + if d.inErrorRecoveryMode(recognizer) { + return + } + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + la := recognizer.GetTokenStream().LA(1) + + // try cheaper subset first might get lucky. seems to shave a wee bit off + nextTokens := recognizer.GetATN().NextTokens(s, nil) + if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { + return + } + + switch s.GetStateType() { + case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: + // Report error and recover if possible + if d.SingleTokenDeletion(recognizer) != nil { + return + } + panic(NewInputMisMatchException(recognizer)) + case ATNStatePlusLoopBack, ATNStateStarLoopBack: + d.ReportUnwantedToken(recognizer) + expecting := NewIntervalSet() + expecting.addSet(recognizer.GetExpectedTokens()) + whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) + d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) + default: + // do nothing if we can't identify the exact kind of ATN state + } +} + +// This is called by {@link //ReportError} when the exception is a +// {@link NoViableAltException}. +// +// @see //ReportError +// +// @param recognizer the parser instance +// @param e the recognition exception +// +func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { + tokens := recognizer.GetTokenStream() + var input string + if tokens != nil { + if e.startToken.GetTokenType() == TokenEOF { + input = "" + } else { + input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) + } + } else { + input = "" + } + msg := "no viable alternative at input " + d.escapeWSAndQuote(input) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// +// This is called by {@link //ReportError} when the exception is an +// {@link InputMisMatchException}. +// +// @see //ReportError +// +// @param recognizer the parser instance +// @param e the recognition exception +// +func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { + msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + + " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// +// This is called by {@link //ReportError} when the exception is a +// {@link FailedPredicateException}. +// +// @see //ReportError +// +// @param recognizer the parser instance +// @param e the recognition exception +// +func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { + ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] + msg := "rule " + ruleName + " " + e.message + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// This method is called to Report a syntax error which requires the removal +// of a token from the input stream. At the time d method is called, the +// erroneous symbol is current {@code LT(1)} symbol and has not yet been +// removed from the input stream. When d method returns, +// {@code recognizer} is in error recovery mode. +// +//

This method is called when {@link //singleTokenDeletion} identifies +// single-token deletion as a viable recovery strategy for a mismatched +// input error.

+// +//

The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to +// enter error recovery mode, followed by calling +// {@link Parser//NotifyErrorListeners}.

+// +// @param recognizer the parser instance +// +func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { + if d.inErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + tokenName := d.GetTokenErrorDisplay(t) + expecting := d.GetExpectedTokens(recognizer) + msg := "extraneous input " + tokenName + " expecting " + + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +// This method is called to Report a syntax error which requires the +// insertion of a missing token into the input stream. At the time d +// method is called, the missing token has not yet been inserted. When d +// method returns, {@code recognizer} is in error recovery mode. +// +//

This method is called when {@link //singleTokenInsertion} identifies +// single-token insertion as a viable recovery strategy for a mismatched +// input error.

+// +//

The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to +// enter error recovery mode, followed by calling +// {@link Parser//NotifyErrorListeners}.

+// +// @param recognizer the parser instance +// +func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { + if d.inErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + + " at " + d.GetTokenErrorDisplay(t) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +//

The default implementation attempts to recover from the mismatched input +// by using single token insertion and deletion as described below. If the +// recovery attempt fails, d method panics an +// {@link InputMisMatchException}.

+// +//

EXTRA TOKEN (single token deletion)

+// +//

{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the +// right token, however, then assume {@code LA(1)} is some extra spurious +// token and delete it. Then consume and return the next token (which was +// the {@code LA(2)} token) as the successful result of the Match operation.

+// +//

This recovery strategy is implemented by {@link +// //singleTokenDeletion}.

+// +//

MISSING TOKEN (single token insertion)

+// +//

If current token (at {@code LA(1)}) is consistent with what could come +// after the expected {@code LA(1)} token, then assume the token is missing +// and use the parser's {@link TokenFactory} to create it on the fly. The +// "insertion" is performed by returning the created token as the successful +// result of the Match operation.

+// +//

This recovery strategy is implemented by {@link +// //singleTokenInsertion}.

+// +//

EXAMPLE

+// +//

For example, Input {@code i=(3} is clearly missing the {@code ')'}. When +// the parser returns from the nested call to {@code expr}, it will have +// call chain:

+// +//
+// stat &rarr expr &rarr atom
+// 
+// +// and it will be trying to Match the {@code ')'} at d point in the +// derivation: +// +//
+// => ID '=' '(' INT ')' ('+' atom)* ''
+// ^
+// 
+// +// The attempt to Match {@code ')'} will fail when it sees {@code ''} and +// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''} +// is in the set of tokens that can follow the {@code ')'} token reference +// in rule {@code atom}. It can assume that you forgot the {@code ')'}. +// +func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { + // SINGLE TOKEN DELETION + MatchedSymbol := d.SingleTokenDeletion(recognizer) + if MatchedSymbol != nil { + // we have deleted the extra token. + // now, move past ttype token as if all were ok + recognizer.Consume() + return MatchedSymbol + } + // SINGLE TOKEN INSERTION + if d.SingleTokenInsertion(recognizer) { + return d.GetMissingSymbol(recognizer) + } + // even that didn't work must panic the exception + panic(NewInputMisMatchException(recognizer)) +} + +// +// This method implements the single-token insertion inline error recovery +// strategy. It is called by {@link //recoverInline} if the single-token +// deletion strategy fails to recover from the mismatched input. If this +// method returns {@code true}, {@code recognizer} will be in error recovery +// mode. +// +//

This method determines whether or not single-token insertion is viable by +// checking if the {@code LA(1)} input symbol could be successfully Matched +// if it were instead the {@code LA(2)} symbol. If d method returns +// {@code true}, the caller is responsible for creating and inserting a +// token with the correct type to produce d behavior.

+// +// @param recognizer the parser instance +// @return {@code true} if single-token insertion is a viable recovery +// strategy for the current mismatched input, otherwise {@code false} +// +func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { + currentSymbolType := recognizer.GetTokenStream().LA(1) + // if current token is consistent with what could come after current + // ATN state, then we know we're missing a token error recovery + // is free to conjure up and insert the missing token + atn := recognizer.GetInterpreter().atn + currentState := atn.states[recognizer.GetState()] + next := currentState.GetTransitions()[0].getTarget() + expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) + if expectingAtLL2.contains(currentSymbolType) { + d.ReportMissingToken(recognizer) + return true + } + + return false +} + +// This method implements the single-token deletion inline error recovery +// strategy. It is called by {@link //recoverInline} to attempt to recover +// from mismatched input. If this method returns nil, the parser and error +// handler state will not have changed. If this method returns non-nil, +// {@code recognizer} will not be in error recovery mode since the +// returned token was a successful Match. +// +//

If the single-token deletion is successful, d method calls +// {@link //ReportUnwantedToken} to Report the error, followed by +// {@link Parser//consume} to actually "delete" the extraneous token. Then, +// before returning {@link //ReportMatch} is called to signal a successful +// Match.

+// +// @param recognizer the parser instance +// @return the successfully Matched {@link Token} instance if single-token +// deletion successfully recovers from the mismatched input, otherwise +// {@code nil} +// +func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { + NextTokenType := recognizer.GetTokenStream().LA(2) + expecting := d.GetExpectedTokens(recognizer) + if expecting.contains(NextTokenType) { + d.ReportUnwantedToken(recognizer) + // print("recoverFromMisMatchedToken deleting " \ + // + str(recognizer.GetTokenStream().LT(1)) \ + // + " since " + str(recognizer.GetTokenStream().LT(2)) \ + // + " is what we want", file=sys.stderr) + recognizer.Consume() // simply delete extra token + // we want to return the token we're actually Matching + MatchedSymbol := recognizer.GetCurrentToken() + d.ReportMatch(recognizer) // we know current token is correct + return MatchedSymbol + } + + return nil +} + +// Conjure up a missing token during error recovery. +// +// The recognizer attempts to recover from single missing +// symbols. But, actions might refer to that missing symbol. +// For example, x=ID {f($x)}. The action clearly assumes +// that there has been an identifier Matched previously and that +// $x points at that token. If that token is missing, but +// the next token in the stream is what we want we assume that +// d token is missing and we keep going. Because we +// have to return some token to replace the missing token, +// we have to conjure one up. This method gives the user control +// over the tokens returned for missing tokens. Mostly, +// you will want to create something special for identifier +// tokens. For literals such as '{' and ',', the default +// action in the parser or tree parser works. It simply creates +// a CommonToken of the appropriate type. The text will be the token. +// If you change what tokens must be created by the lexer, +// override d method to create the appropriate tokens. +// +func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { + currentSymbol := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + expectedTokenType := expecting.first() + var tokenText string + + if expectedTokenType == TokenEOF { + tokenText = "" + } else { + ln := recognizer.GetLiteralNames() + if expectedTokenType > 0 && expectedTokenType < len(ln) { + tokenText = "" + } else { + tokenText = "" // TODO matches the JS impl + } + } + current := currentSymbol + lookback := recognizer.GetTokenStream().LT(-1) + if current.GetTokenType() == TokenEOF && lookback != nil { + current = lookback + } + + tf := recognizer.GetTokenFactory() + + return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) +} + +func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { + return recognizer.GetExpectedTokens() +} + +// How should a token be displayed in an error message? The default +// is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. +// +func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + return d.escapeWSAndQuote(s) +} + +func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + return "'" + s + "'" +} + +// Compute the error recovery set for the current rule. During +// rule invocation, the parser pushes the set of tokens that can +// follow that rule reference on the stack d amounts to +// computing FIRST of what follows the rule reference in the +// enclosing rule. See LinearApproximator.FIRST(). +// This local follow set only includes tokens +// from within the rule i.e., the FIRST computation done by +// ANTLR stops at the end of a rule. +// +// EXAMPLE +// +// When you find a "no viable alt exception", the input is not +// consistent with any of the alternatives for rule r. The best +// thing to do is to consume tokens until you see something that +// can legally follow a call to r//or* any rule that called r. +// You don't want the exact set of viable next tokens because the +// input might just be missing a token--you might consume the +// rest of the input looking for one of the missing tokens. +// +// Consider grammar: +// +// a : '[' b ']' +// | '(' b ')' +// +// b : c '^' INT +// c : ID +// | INT +// +// +// At each rule invocation, the set of tokens that could follow +// that rule is pushed on a stack. Here are the various +// context-sensitive follow sets: +// +// FOLLOW(b1_in_a) = FIRST(']') = ']' +// FOLLOW(b2_in_a) = FIRST(')') = ')' +// FOLLOW(c_in_b) = FIRST('^') = '^' +// +// Upon erroneous input "[]", the call chain is +// +// a -> b -> c +// +// and, hence, the follow context stack is: +// +// depth follow set start of rule execution +// 0 a (from main()) +// 1 ']' b +// 2 '^' c +// +// Notice that ')' is not included, because b would have to have +// been called from a different context in rule a for ')' to be +// included. +// +// For error recovery, we cannot consider FOLLOW(c) +// (context-sensitive or otherwise). We need the combined set of +// all context-sensitive FOLLOW sets--the set of all tokens that +// could follow any reference in the call chain. We need to +// reSync to one of those tokens. Note that FOLLOW(c)='^' and if +// we reSync'd to that token, we'd consume until EOF. We need to +// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. +// In this case, for input "[]", LA(1) is ']' and in the set, so we would +// not consume anything. After printing an error, rule c would +// return normally. Rule b would not find the required '^' though. +// At this point, it gets a mismatched token error and panics an +// exception (since LA(1) is not in the viable following token +// set). The rule exception handler tries to recover, but finds +// the same recovery set and doesn't consume anything. Rule b +// exits normally returning to rule a. Now it finds the ']' (and +// with the successful Match exits errorRecovery mode). +// +// So, you can see that the parser walks up the call chain looking +// for the token that was a member of the recovery set. +// +// Errors are not generated in errorRecovery mode. +// +// ANTLR's error recovery mechanism is based upon original ideas: +// +// "Algorithms + Data Structures = Programs" by Niklaus Wirth +// +// and +// +// "A note on error recovery in recursive descent parsers": +// http://portal.acm.org/citation.cfm?id=947902.947905 +// +// Later, Josef Grosch had some good ideas: +// +// "Efficient and Comfortable Error Recovery in Recursive Descent +// Parsers": +// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip +// +// Like Grosch I implement context-sensitive FOLLOW sets that are combined +// at run-time upon error to avoid overhead during parsing. +// +func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet { + atn := recognizer.GetInterpreter().atn + ctx := recognizer.GetParserRuleContext() + recoverSet := NewIntervalSet() + for ctx != nil && ctx.GetInvokingState() >= 0 { + // compute what follows who invoked us + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) + recoverSet.addSet(follow) + ctx = ctx.GetParent().(ParserRuleContext) + } + recoverSet.removeOne(TokenEpsilon) + return recoverSet +} + +// Consume tokens until one Matches the given token set.// +func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { + ttype := recognizer.GetTokenStream().LA(1) + for ttype != TokenEOF && !set.contains(ttype) { + recognizer.Consume() + ttype = recognizer.GetTokenStream().LA(1) + } +} + +// +// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors +// by immediately canceling the parse operation with a +// {@link ParseCancellationException}. The implementation ensures that the +// {@link ParserRuleContext//exception} field is set for all parse tree nodes +// that were not completed prior to encountering the error. +// +//

+// This error strategy is useful in the following scenarios.

+// +//
    +//
  • Two-stage parsing: This error strategy allows the first +// stage of two-stage parsing to immediately terminate if an error is +// encountered, and immediately fall back to the second stage. In addition to +// avoiding wasted work by attempting to recover from errors here, the empty +// implementation of {@link BailErrorStrategy//Sync} improves the performance of +// the first stage.
  • +//
  • Silent validation: When syntax errors are not being +// Reported or logged, and the parse result is simply ignored if errors occur, +// the {@link BailErrorStrategy} avoids wasting work on recovering from errors +// when the result will be ignored either way.
  • +//
+// +//

+// {@code myparser.setErrorHandler(NewBailErrorStrategy())}

+// +// @see Parser//setErrorHandler(ANTLRErrorStrategy) + +type BailErrorStrategy struct { + *DefaultErrorStrategy +} + +var _ ErrorStrategy = &BailErrorStrategy{} + +func NewBailErrorStrategy() *BailErrorStrategy { + + b := new(BailErrorStrategy) + + b.DefaultErrorStrategy = NewDefaultErrorStrategy() + + return b +} + +// Instead of recovering from exception {@code e}, re-panic it wrapped +// in a {@link ParseCancellationException} so it is not caught by the +// rule func catches. Use {@link Exception//getCause()} to get the +// original {@link RecognitionException}. +// +func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { + context := recognizer.GetParserRuleContext() + for context != nil { + context.SetException(e) + context = context.GetParent().(ParserRuleContext) + } + panic(NewParseCancellationException()) // TODO we don't emit e properly +} + +// Make sure we don't attempt to recover inline if the parser +// successfully recovers, it won't panic an exception. +// +func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { + b.Recover(recognizer, NewInputMisMatchException(recognizer)) + + return nil +} + +// Make sure we don't attempt to recover from problems in subrules.// +func (b *BailErrorStrategy) Sync(recognizer Parser) { + // pass +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go new file mode 100644 index 00000000000..2ef74926ecb --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go @@ -0,0 +1,241 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just +// 3 kinds of errors: prediction errors, failed predicate errors, and +// mismatched input errors. In each case, the parser knows where it is +// in the input, where it is in the ATN, the rule invocation stack, +// and what kind of problem occurred. + +type RecognitionException interface { + GetOffendingToken() Token + GetMessage() string + GetInputStream() IntStream +} + +type BaseRecognitionException struct { + message string + recognizer Recognizer + offendingToken Token + offendingState int + ctx RuleContext + input IntStream +} + +func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { + + // todo + // Error.call(this) + // + // if (!!Error.captureStackTrace) { + // Error.captureStackTrace(this, RecognitionException) + // } else { + // stack := NewError().stack + // } + // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int + + t := new(BaseRecognitionException) + + t.message = message + t.recognizer = recognizer + t.input = input + t.ctx = ctx + // The current {@link Token} when an error occurred. Since not all streams + // support accessing symbols by index, we have to track the {@link Token} + // instance itself. + t.offendingToken = nil + // Get the ATN state number the parser was in at the time the error + // occurred. For {@link NoViableAltException} and + // {@link LexerNoViableAltException} exceptions, this is the + // {@link DecisionState} number. For others, it is the state whose outgoing + // edge we couldn't Match. + t.offendingState = -1 + if t.recognizer != nil { + t.offendingState = t.recognizer.GetState() + } + + return t +} + +func (b *BaseRecognitionException) GetMessage() string { + return b.message +} + +func (b *BaseRecognitionException) GetOffendingToken() Token { + return b.offendingToken +} + +func (b *BaseRecognitionException) GetInputStream() IntStream { + return b.input +} + +//

If the state number is not known, b method returns -1.

+ +// +// Gets the set of input symbols which could potentially follow the +// previously Matched symbol at the time b exception was panicn. +// +//

If the set of expected tokens is not known and could not be computed, +// b method returns {@code nil}.

+// +// @return The set of token types that could potentially follow the current +// state in the ATN, or {@code nil} if the information is not available. +// / +func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { + if b.recognizer != nil { + return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) + } + + return nil +} + +func (b *BaseRecognitionException) String() string { + return b.message +} + +type LexerNoViableAltException struct { + *BaseRecognitionException + + startIndex int + deadEndConfigs ATNConfigSet +} + +func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { + + l := new(LexerNoViableAltException) + + l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) + + l.startIndex = startIndex + l.deadEndConfigs = deadEndConfigs + + return l +} + +func (l *LexerNoViableAltException) String() string { + symbol := "" + if l.startIndex >= 0 && l.startIndex < l.input.Size() { + symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) + } + return "LexerNoViableAltException" + symbol +} + +type NoViableAltException struct { + *BaseRecognitionException + + startToken Token + offendingToken Token + ctx ParserRuleContext + deadEndConfigs ATNConfigSet +} + +// Indicates that the parser could not decide which of two or more paths +// to take based upon the remaining input. It tracks the starting token +// of the offending input and also knows where the parser was +// in the various paths when the error. Reported by ReportNoViableAlternative() +// +func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { + + if ctx == nil { + ctx = recognizer.GetParserRuleContext() + } + + if offendingToken == nil { + offendingToken = recognizer.GetCurrentToken() + } + + if startToken == nil { + startToken = recognizer.GetCurrentToken() + } + + if input == nil { + input = recognizer.GetInputStream().(TokenStream) + } + + n := new(NoViableAltException) + n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) + + // Which configurations did we try at input.Index() that couldn't Match + // input.LT(1)?// + n.deadEndConfigs = deadEndConfigs + // The token object at the start index the input stream might + // not be buffering tokens so get a reference to it. (At the + // time the error occurred, of course the stream needs to keep a + // buffer all of the tokens but later we might not have access to those.) + n.startToken = startToken + n.offendingToken = offendingToken + + return n +} + +type InputMisMatchException struct { + *BaseRecognitionException +} + +// This signifies any kind of mismatched input exceptions such as +// when the current input does not Match the expected token. +// +func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { + + i := new(InputMisMatchException) + i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + i.offendingToken = recognizer.GetCurrentToken() + + return i + +} + +// A semantic predicate failed during validation. Validation of predicates +// occurs when normally parsing the alternative just like Matching a token. +// Disambiguating predicate evaluation occurs when we test a predicate during +// prediction. + +type FailedPredicateException struct { + *BaseRecognitionException + + ruleIndex int + predicateIndex int + predicate string +} + +func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { + + f := new(FailedPredicateException) + + f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + trans := s.GetTransitions()[0] + if trans2, ok := trans.(*PredicateTransition); ok { + f.ruleIndex = trans2.ruleIndex + f.predicateIndex = trans2.predIndex + } else { + f.ruleIndex = 0 + f.predicateIndex = 0 + } + f.predicate = predicate + f.offendingToken = recognizer.GetCurrentToken() + + return f +} + +func (f *FailedPredicateException) formatMessage(predicate, message string) string { + if message != "" { + return message + } + + return "failed predicate: {" + predicate + "}?" +} + +type ParseCancellationException struct { +} + +func NewParseCancellationException() *ParseCancellationException { + // Error.call(this) + // Error.captureStackTrace(this, ParseCancellationException) + return new(ParseCancellationException) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go new file mode 100644 index 00000000000..842170c086c --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go @@ -0,0 +1,49 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "io" + "os" +) + +// This is an InputStream that is loaded from a file all at once +// when you construct the object. + +type FileStream struct { + *InputStream + + filename string +} + +func NewFileStream(fileName string) (*FileStream, error) { + + buf := bytes.NewBuffer(nil) + + f, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer f.Close() + _, err = io.Copy(buf, f) + if err != nil { + return nil, err + } + + fs := new(FileStream) + + fs.filename = fileName + s := string(buf.Bytes()) + + fs.InputStream = NewInputStream(s) + + return fs, nil + +} + +func (f *FileStream) GetSourceName() string { + return f.filename +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go new file mode 100644 index 00000000000..5ff270f5368 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go @@ -0,0 +1,113 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type InputStream struct { + name string + index int + data []rune + size int +} + +func NewInputStream(data string) *InputStream { + + is := new(InputStream) + + is.name = "" + is.index = 0 + is.data = []rune(data) + is.size = len(is.data) // number of runes + + return is +} + +func (is *InputStream) reset() { + is.index = 0 +} + +func (is *InputStream) Consume() { + if is.index >= is.size { + // assert is.LA(1) == TokenEOF + panic("cannot consume EOF") + } + is.index++ +} + +func (is *InputStream) LA(offset int) int { + + if offset == 0 { + return 0 // nil + } + if offset < 0 { + offset++ // e.g., translate LA(-1) to use offset=0 + } + pos := is.index + offset - 1 + + if pos < 0 || pos >= is.size { // invalid + return TokenEOF + } + + return int(is.data[pos]) +} + +func (is *InputStream) LT(offset int) int { + return is.LA(offset) +} + +func (is *InputStream) Index() int { + return is.index +} + +func (is *InputStream) Size() int { + return is.size +} + +// mark/release do nothing we have entire buffer +func (is *InputStream) Mark() int { + return -1 +} + +func (is *InputStream) Release(marker int) { +} + +func (is *InputStream) Seek(index int) { + if index <= is.index { + is.index = index // just jump don't update stream state (line,...) + return + } + // seek forward + is.index = intMin(index, is.size) +} + +func (is *InputStream) GetText(start int, stop int) string { + if stop >= is.size { + stop = is.size - 1 + } + if start >= is.size { + return "" + } + + return string(is.data[start : stop+1]) +} + +func (is *InputStream) GetTextFromTokens(start, stop Token) string { + if start != nil && stop != nil { + return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) + } + + return "" +} + +func (is *InputStream) GetTextFromInterval(i *Interval) string { + return is.GetText(i.Start, i.Stop) +} + +func (*InputStream) GetSourceName() string { + return "Obtained from string" +} + +func (is *InputStream) String() string { + return string(is.data) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go new file mode 100644 index 00000000000..438e0ea6e75 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go @@ -0,0 +1,16 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type IntStream interface { + Consume() + LA(int) int + Mark() int + Release(marker int) + Index() int + Seek(index int) + Size() int + GetSourceName() string +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go new file mode 100644 index 00000000000..510d9091141 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go @@ -0,0 +1,296 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" + "strings" +) + +type Interval struct { + Start int + Stop int +} + +/* stop is not included! */ +func NewInterval(start, stop int) *Interval { + i := new(Interval) + + i.Start = start + i.Stop = stop + return i +} + +func (i *Interval) Contains(item int) bool { + return item >= i.Start && item < i.Stop +} + +func (i *Interval) String() string { + if i.Start == i.Stop-1 { + return strconv.Itoa(i.Start) + } + + return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) +} + +func (i *Interval) length() int { + return i.Stop - i.Start +} + +type IntervalSet struct { + intervals []*Interval + readOnly bool +} + +func NewIntervalSet() *IntervalSet { + + i := new(IntervalSet) + + i.intervals = nil + i.readOnly = false + + return i +} + +func (i *IntervalSet) first() int { + if len(i.intervals) == 0 { + return TokenInvalidType + } + + return i.intervals[0].Start +} + +func (i *IntervalSet) addOne(v int) { + i.addInterval(NewInterval(v, v+1)) +} + +func (i *IntervalSet) addRange(l, h int) { + i.addInterval(NewInterval(l, h+1)) +} + +func (i *IntervalSet) addInterval(v *Interval) { + if i.intervals == nil { + i.intervals = make([]*Interval, 0) + i.intervals = append(i.intervals, v) + } else { + // find insert pos + for k, interval := range i.intervals { + // distinct range -> insert + if v.Stop < interval.Start { + i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) + return + } else if v.Stop == interval.Start { + i.intervals[k].Start = v.Start + return + } else if v.Start <= interval.Stop { + i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop)) + + // if not applying to end, merge potential overlaps + if k < len(i.intervals)-1 { + l := i.intervals[k] + r := i.intervals[k+1] + // if r contained in l + if l.Stop >= r.Stop { + i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) + } else if l.Stop >= r.Start { // partial overlap + i.intervals[k] = NewInterval(l.Start, r.Stop) + i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) + } + } + return + } + } + // greater than any exiting + i.intervals = append(i.intervals, v) + } +} + +func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { + if other.intervals != nil { + for k := 0; k < len(other.intervals); k++ { + i2 := other.intervals[k] + i.addInterval(NewInterval(i2.Start, i2.Stop)) + } + } + return i +} + +func (i *IntervalSet) complement(start int, stop int) *IntervalSet { + result := NewIntervalSet() + result.addInterval(NewInterval(start, stop+1)) + for j := 0; j < len(i.intervals); j++ { + result.removeRange(i.intervals[j]) + } + return result +} + +func (i *IntervalSet) contains(item int) bool { + if i.intervals == nil { + return false + } + for k := 0; k < len(i.intervals); k++ { + if i.intervals[k].Contains(item) { + return true + } + } + return false +} + +func (i *IntervalSet) length() int { + len := 0 + + for _, v := range i.intervals { + len += v.length() + } + + return len +} + +func (i *IntervalSet) removeRange(v *Interval) { + if v.Start == v.Stop-1 { + i.removeOne(v.Start) + } else if i.intervals != nil { + k := 0 + for n := 0; n < len(i.intervals); n++ { + ni := i.intervals[k] + // intervals are ordered + if v.Stop <= ni.Start { + return + } else if v.Start > ni.Start && v.Stop < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + x := NewInterval(v.Stop, ni.Stop) + // i.intervals.splice(k, 0, x) + i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + return + } else if v.Start <= ni.Start && v.Stop >= ni.Stop { + // i.intervals.splice(k, 1) + i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) + k = k - 1 // need another pass + } else if v.Start < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + } else if v.Stop < ni.Stop { + i.intervals[k] = NewInterval(v.Stop, ni.Stop) + } + k++ + } + } +} + +func (i *IntervalSet) removeOne(v int) { + if i.intervals != nil { + for k := 0; k < len(i.intervals); k++ { + ki := i.intervals[k] + // intervals i ordered + if v < ki.Start { + return + } else if v == ki.Start && v == ki.Stop-1 { + // i.intervals.splice(k, 1) + i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) + return + } else if v == ki.Start { + i.intervals[k] = NewInterval(ki.Start+1, ki.Stop) + return + } else if v == ki.Stop-1 { + i.intervals[k] = NewInterval(ki.Start, ki.Stop-1) + return + } else if v < ki.Stop-1 { + x := NewInterval(ki.Start, v) + ki.Start = v + 1 + // i.intervals.splice(k, 0, x) + i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + return + } + } + } +} + +func (i *IntervalSet) String() string { + return i.StringVerbose(nil, nil, false) +} + +func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string { + + if i.intervals == nil { + return "{}" + } else if literalNames != nil || symbolicNames != nil { + return i.toTokenString(literalNames, symbolicNames) + } else if elemsAreChar { + return i.toCharString() + } + + return i.toIndexString() +} + +func (i *IntervalSet) toCharString() string { + names := make([]string, len(i.intervals)) + + for j := 0; j < len(i.intervals); j++ { + v := i.intervals[j] + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { + names = append(names, "") + } else { + names = append(names, ("'" + string(v.Start) + "'")) + } + } else { + names = append(names, "'"+string(v.Start)+"'..'"+string(v.Stop-1)+"'") + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) toIndexString() string { + + names := make([]string, 0) + for j := 0; j < len(i.intervals); j++ { + v := i.intervals[j] + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { + names = append(names, "") + } else { + names = append(names, strconv.Itoa(v.Start)) + } + } else { + names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1)) + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string { + names := make([]string, 0) + for _, v := range i.intervals { + for j := v.Start; j < v.Stop; j++ { + names = append(names, i.elementName(literalNames, symbolicNames, j)) + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string { + if a == TokenEOF { + return "" + } else if a == TokenEpsilon { + return "" + } else { + if a < len(literalNames) && literalNames[a] != "" { + return literalNames[a] + } + + return symbolicNames[a] + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go new file mode 100644 index 00000000000..02deaf99cdf --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go @@ -0,0 +1,417 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// A lexer is recognizer that draws input symbols from a character stream. +// lexer grammars result in a subclass of this object. A Lexer object +// uses simplified Match() and error recovery mechanisms in the interest +// of speed. +/// + +type Lexer interface { + TokenSource + Recognizer + + Emit() Token + + SetChannel(int) + PushMode(int) + PopMode() int + SetType(int) + SetMode(int) +} + +type BaseLexer struct { + *BaseRecognizer + + Interpreter ILexerATNSimulator + TokenStartCharIndex int + TokenStartLine int + TokenStartColumn int + ActionType int + Virt Lexer // The most derived lexer implementation. Allows virtual method calls. + + input CharStream + factory TokenFactory + tokenFactorySourcePair *TokenSourceCharStreamPair + token Token + hitEOF bool + channel int + thetype int + modeStack IntStack + mode int + text string +} + +func NewBaseLexer(input CharStream) *BaseLexer { + + lexer := new(BaseLexer) + + lexer.BaseRecognizer = NewBaseRecognizer() + + lexer.input = input + lexer.factory = CommonTokenFactoryDEFAULT + lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input} + + lexer.Virt = lexer + + lexer.Interpreter = nil // child classes must populate it + + // The goal of all lexer rules/methods is to create a token object. + // l is an instance variable as multiple rules may collaborate to + // create a single token. NextToken will return l object after + // Matching lexer rule(s). If you subclass to allow multiple token + // emissions, then set l to the last token to be Matched or + // something nonnil so that the auto token emit mechanism will not + // emit another token. + lexer.token = nil + + // What character index in the stream did the current token start at? + // Needed, for example, to get the text for current token. Set at + // the start of NextToken. + lexer.TokenStartCharIndex = -1 + + // The line on which the first character of the token resides/// + lexer.TokenStartLine = -1 + + // The character position of first character within the line/// + lexer.TokenStartColumn = -1 + + // Once we see EOF on char stream, next token will be EOF. + // If you have DONE : EOF then you see DONE EOF. + lexer.hitEOF = false + + // The channel number for the current token/// + lexer.channel = TokenDefaultChannel + + // The token type for the current token/// + lexer.thetype = TokenInvalidType + + lexer.modeStack = make([]int, 0) + lexer.mode = LexerDefaultMode + + // You can set the text for the current token to override what is in + // the input char buffer. Use setText() or can set l instance var. + // / + lexer.text = "" + + return lexer +} + +const ( + LexerDefaultMode = 0 + LexerMore = -2 + LexerSkip = -3 +) + +const ( + LexerDefaultTokenChannel = TokenDefaultChannel + LexerHidden = TokenHiddenChannel + LexerMinCharValue = 0x0000 + LexerMaxCharValue = 0x10FFFF +) + +func (b *BaseLexer) reset() { + // wack Lexer state variables + if b.input != nil { + b.input.Seek(0) // rewind the input + } + b.token = nil + b.thetype = TokenInvalidType + b.channel = TokenDefaultChannel + b.TokenStartCharIndex = -1 + b.TokenStartColumn = -1 + b.TokenStartLine = -1 + b.text = "" + + b.hitEOF = false + b.mode = LexerDefaultMode + b.modeStack = make([]int, 0) + + b.Interpreter.reset() +} + +func (b *BaseLexer) GetInterpreter() ILexerATNSimulator { + return b.Interpreter +} + +func (b *BaseLexer) GetInputStream() CharStream { + return b.input +} + +func (b *BaseLexer) GetSourceName() string { + return b.GrammarFileName +} + +func (b *BaseLexer) SetChannel(v int) { + b.channel = v +} + +func (b *BaseLexer) GetTokenFactory() TokenFactory { + return b.factory +} + +func (b *BaseLexer) setTokenFactory(f TokenFactory) { + b.factory = f +} + +func (b *BaseLexer) safeMatch() (ret int) { + defer func() { + if e := recover(); e != nil { + if re, ok := e.(RecognitionException); ok { + b.notifyListeners(re) // Report error + b.Recover(re) + ret = LexerSkip // default + } + } + }() + + return b.Interpreter.Match(b.input, b.mode) +} + +// Return a token from l source i.e., Match a token on the char stream. +func (b *BaseLexer) NextToken() Token { + if b.input == nil { + panic("NextToken requires a non-nil input stream.") + } + + tokenStartMarker := b.input.Mark() + + // previously in finally block + defer func() { + // make sure we release marker after Match or + // unbuffered char stream will keep buffering + b.input.Release(tokenStartMarker) + }() + + for { + if b.hitEOF { + b.EmitEOF() + return b.token + } + b.token = nil + b.channel = TokenDefaultChannel + b.TokenStartCharIndex = b.input.Index() + b.TokenStartColumn = b.Interpreter.GetCharPositionInLine() + b.TokenStartLine = b.Interpreter.GetLine() + b.text = "" + continueOuter := false + for { + b.thetype = TokenInvalidType + ttype := LexerSkip + + ttype = b.safeMatch() + + if b.input.LA(1) == TokenEOF { + b.hitEOF = true + } + if b.thetype == TokenInvalidType { + b.thetype = ttype + } + if b.thetype == LexerSkip { + continueOuter = true + break + } + if b.thetype != LexerMore { + break + } + } + + if continueOuter { + continue + } + if b.token == nil { + b.Virt.Emit() + } + return b.token + } + + return nil +} + +// Instruct the lexer to Skip creating a token for current lexer rule +// and look for another token. NextToken() knows to keep looking when +// a lexer rule finishes with token set to SKIPTOKEN. Recall that +// if token==nil at end of any token rule, it creates one for you +// and emits it. +// / +func (b *BaseLexer) Skip() { + b.thetype = LexerSkip +} + +func (b *BaseLexer) More() { + b.thetype = LexerMore +} + +func (b *BaseLexer) SetMode(m int) { + b.mode = m +} + +func (b *BaseLexer) PushMode(m int) { + if LexerATNSimulatorDebug { + fmt.Println("pushMode " + strconv.Itoa(m)) + } + b.modeStack.Push(b.mode) + b.mode = m +} + +func (b *BaseLexer) PopMode() int { + if len(b.modeStack) == 0 { + panic("Empty Stack") + } + if LexerATNSimulatorDebug { + fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) + } + i, _ := b.modeStack.Pop() + b.mode = i + return b.mode +} + +func (b *BaseLexer) inputStream() CharStream { + return b.input +} + +func (b *BaseLexer) setInputStream(input CharStream) { + b.input = nil + b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} + b.reset() + b.input = input + b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} +} + +func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { + return b.tokenFactorySourcePair +} + +// By default does not support multiple emits per NextToken invocation +// for efficiency reasons. Subclass and override l method, NextToken, +// and GetToken (to push tokens into a list and pull from that list +// rather than a single variable as l implementation does). +// / +func (b *BaseLexer) EmitToken(token Token) { + b.token = token +} + +// The standard method called to automatically emit a token at the +// outermost lexical rule. The token object should point into the +// char buffer start..stop. If there is a text override in 'text', +// use that to set the token's text. Override l method to emit +// custom Token objects or provide a Newfactory. +// / +func (b *BaseLexer) Emit() Token { + t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) + b.EmitToken(t) + return t +} + +func (b *BaseLexer) EmitEOF() Token { + cpos := b.GetCharPositionInLine() + lpos := b.GetLine() + eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos) + b.EmitToken(eof) + return eof +} + +func (b *BaseLexer) GetCharPositionInLine() int { + return b.Interpreter.GetCharPositionInLine() +} + +func (b *BaseLexer) GetLine() int { + return b.Interpreter.GetLine() +} + +func (b *BaseLexer) GetType() int { + return b.thetype +} + +func (b *BaseLexer) SetType(t int) { + b.thetype = t +} + +// What is the index of the current character of lookahead?/// +func (b *BaseLexer) GetCharIndex() int { + return b.input.Index() +} + +// Return the text Matched so far for the current token or any text override. +//Set the complete text of l token it wipes any previous changes to the text. +func (b *BaseLexer) GetText() string { + if b.text != "" { + return b.text + } + + return b.Interpreter.GetText(b.input) +} + +func (b *BaseLexer) SetText(text string) { + b.text = text +} + +func (b *BaseLexer) GetATN() *ATN { + return b.Interpreter.ATN() +} + +// Return a list of all Token objects in input char stream. +// Forces load of all tokens. Does not include EOF token. +// / +func (b *BaseLexer) GetAllTokens() []Token { + vl := b.Virt + tokens := make([]Token, 0) + t := vl.NextToken() + for t.GetTokenType() != TokenEOF { + tokens = append(tokens, t) + t = vl.NextToken() + } + return tokens +} + +func (b *BaseLexer) notifyListeners(e RecognitionException) { + start := b.TokenStartCharIndex + stop := b.input.Index() + text := b.input.GetTextFromInterval(NewInterval(start, stop)) + msg := "token recognition error at: '" + text + "'" + listener := b.GetErrorListenerDispatch() + listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e) +} + +func (b *BaseLexer) getErrorDisplayForChar(c rune) string { + if c == TokenEOF { + return "" + } else if c == '\n' { + return "\\n" + } else if c == '\t' { + return "\\t" + } else if c == '\r' { + return "\\r" + } else { + return string(c) + } +} + +func (b *BaseLexer) getCharErrorDisplay(c rune) string { + return "'" + b.getErrorDisplayForChar(c) + "'" +} + +// Lexers can normally Match any char in it's vocabulary after Matching +// a token, so do the easy thing and just kill a character and hope +// it all works out. You can instead use the rule invocation stack +// to do sophisticated error recovery if you are in a fragment rule. +// / +func (b *BaseLexer) Recover(re RecognitionException) { + if b.input.LA(1) != TokenEOF { + if _, ok := re.(*LexerNoViableAltException); ok { + // Skip a char and try again + b.Interpreter.Consume(b.input) + } else { + // TODO: Do we lose character or line position information? + b.input.Consume() + } + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go new file mode 100644 index 00000000000..20df84f9437 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go @@ -0,0 +1,431 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "strconv" + +const ( + LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action. + LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action. + LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action. + LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action. + LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action. + LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action. + LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action. + LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action. +) + +type LexerAction interface { + getActionType() int + getIsPositionDependent() bool + execute(lexer Lexer) + hash() int + equals(other LexerAction) bool +} + +type BaseLexerAction struct { + actionType int + isPositionDependent bool +} + +func NewBaseLexerAction(action int) *BaseLexerAction { + la := new(BaseLexerAction) + + la.actionType = action + la.isPositionDependent = false + + return la +} + +func (b *BaseLexerAction) execute(lexer Lexer) { + panic("Not implemented") +} + +func (b *BaseLexerAction) getActionType() int { + return b.actionType +} + +func (b *BaseLexerAction) getIsPositionDependent() bool { + return b.isPositionDependent +} + +func (b *BaseLexerAction) hash() int { + return b.actionType +} + +func (b *BaseLexerAction) equals(other LexerAction) bool { + return b == other +} + +// +// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. +// +//

The {@code Skip} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

+type LexerSkipAction struct { + *BaseLexerAction +} + +func NewLexerSkipAction() *LexerSkipAction { + la := new(LexerSkipAction) + la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip) + return la +} + +// Provides a singleton instance of l parameterless lexer action. +var LexerSkipActionINSTANCE = NewLexerSkipAction() + +func (l *LexerSkipAction) execute(lexer Lexer) { + lexer.Skip() +} + +func (l *LexerSkipAction) String() string { + return "skip" +} + +// Implements the {@code type} lexer action by calling {@link Lexer//setType} +// with the assigned type. +type LexerTypeAction struct { + *BaseLexerAction + + thetype int +} + +func NewLexerTypeAction(thetype int) *LexerTypeAction { + l := new(LexerTypeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType) + l.thetype = thetype + return l +} + +func (l *LexerTypeAction) execute(lexer Lexer) { + lexer.SetType(l.thetype) +} + +func (l *LexerTypeAction) hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.thetype) + return murmurFinish(h, 2) +} + +func (l *LexerTypeAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerTypeAction); !ok { + return false + } else { + return l.thetype == other.(*LexerTypeAction).thetype + } +} + +func (l *LexerTypeAction) String() string { + return "actionType(" + strconv.Itoa(l.thetype) + ")" +} + +// Implements the {@code pushMode} lexer action by calling +// {@link Lexer//pushMode} with the assigned mode. +type LexerPushModeAction struct { + *BaseLexerAction + + mode int +} + +func NewLexerPushModeAction(mode int) *LexerPushModeAction { + + l := new(LexerPushModeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode) + + l.mode = mode + return l +} + +//

This action is implemented by calling {@link Lexer//pushMode} with the +// value provided by {@link //getMode}.

+func (l *LexerPushModeAction) execute(lexer Lexer) { + lexer.PushMode(l.mode) +} + +func (l *LexerPushModeAction) hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.mode) + return murmurFinish(h, 2) +} + +func (l *LexerPushModeAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerPushModeAction); !ok { + return false + } else { + return l.mode == other.(*LexerPushModeAction).mode + } +} + +func (l *LexerPushModeAction) String() string { + return "pushMode(" + strconv.Itoa(l.mode) + ")" +} + +// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. +// +//

The {@code popMode} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

+type LexerPopModeAction struct { + *BaseLexerAction +} + +func NewLexerPopModeAction() *LexerPopModeAction { + + l := new(LexerPopModeAction) + + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode) + + return l +} + +var LexerPopModeActionINSTANCE = NewLexerPopModeAction() + +//

This action is implemented by calling {@link Lexer//popMode}.

+func (l *LexerPopModeAction) execute(lexer Lexer) { + lexer.PopMode() +} + +func (l *LexerPopModeAction) String() string { + return "popMode" +} + +// Implements the {@code more} lexer action by calling {@link Lexer//more}. +// +//

The {@code more} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

+ +type LexerMoreAction struct { + *BaseLexerAction +} + +func NewLexerMoreAction() *LexerMoreAction { + l := new(LexerMoreAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore) + + return l +} + +var LexerMoreActionINSTANCE = NewLexerMoreAction() + +//

This action is implemented by calling {@link Lexer//popMode}.

+func (l *LexerMoreAction) execute(lexer Lexer) { + lexer.More() +} + +func (l *LexerMoreAction) String() string { + return "more" +} + +// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with +// the assigned mode. +type LexerModeAction struct { + *BaseLexerAction + + mode int +} + +func NewLexerModeAction(mode int) *LexerModeAction { + l := new(LexerModeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode) + l.mode = mode + return l +} + +//

This action is implemented by calling {@link Lexer//mode} with the +// value provided by {@link //getMode}.

+func (l *LexerModeAction) execute(lexer Lexer) { + lexer.SetMode(l.mode) +} + +func (l *LexerModeAction) hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.mode) + return murmurFinish(h, 2) +} + +func (l *LexerModeAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerModeAction); !ok { + return false + } else { + return l.mode == other.(*LexerModeAction).mode + } +} + +func (l *LexerModeAction) String() string { + return "mode(" + strconv.Itoa(l.mode) + ")" +} + +// Executes a custom lexer action by calling {@link Recognizer//action} with the +// rule and action indexes assigned to the custom action. The implementation of +// a custom action is added to the generated code for the lexer in an override +// of {@link Recognizer//action} when the grammar is compiled. +// +//

This class may represent embedded actions created with the {...} +// syntax in ANTLR 4, as well as actions created for lexer commands where the +// command argument could not be evaluated when the grammar was compiled.

+ +// Constructs a custom lexer action with the specified rule and action +// indexes. +// +// @param ruleIndex The rule index to use for calls to +// {@link Recognizer//action}. +// @param actionIndex The action index to use for calls to +// {@link Recognizer//action}. + +type LexerCustomAction struct { + *BaseLexerAction + ruleIndex, actionIndex int +} + +func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction { + l := new(LexerCustomAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom) + l.ruleIndex = ruleIndex + l.actionIndex = actionIndex + l.isPositionDependent = true + return l +} + +//

Custom actions are implemented by calling {@link Lexer//action} with the +// appropriate rule and action indexes.

+func (l *LexerCustomAction) execute(lexer Lexer) { + lexer.Action(nil, l.ruleIndex, l.actionIndex) +} + +func (l *LexerCustomAction) hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.ruleIndex) + h = murmurUpdate(h, l.actionIndex) + return murmurFinish(h, 3) +} + +func (l *LexerCustomAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerCustomAction); !ok { + return false + } else { + return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex + } +} + +// Implements the {@code channel} lexer action by calling +// {@link Lexer//setChannel} with the assigned channel. +// Constructs a New{@code channel} action with the specified channel value. +// @param channel The channel value to pass to {@link Lexer//setChannel}. +type LexerChannelAction struct { + *BaseLexerAction + + channel int +} + +func NewLexerChannelAction(channel int) *LexerChannelAction { + l := new(LexerChannelAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) + l.channel = channel + return l +} + +//

This action is implemented by calling {@link Lexer//setChannel} with the +// value provided by {@link //getChannel}.

+func (l *LexerChannelAction) execute(lexer Lexer) { + lexer.SetChannel(l.channel) +} + +func (l *LexerChannelAction) hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.channel) + return murmurFinish(h, 2) +} + +func (l *LexerChannelAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerChannelAction); !ok { + return false + } else { + return l.channel == other.(*LexerChannelAction).channel + } +} + +func (l *LexerChannelAction) String() string { + return "channel(" + strconv.Itoa(l.channel) + ")" +} + +// This implementation of {@link LexerAction} is used for tracking input offsets +// for position-dependent actions within a {@link LexerActionExecutor}. +// +//

This action is not serialized as part of the ATN, and is only required for +// position-dependent lexer actions which appear at a location other than the +// end of a rule. For more information about DFA optimizations employed for +// lexer actions, see {@link LexerActionExecutor//append} and +// {@link LexerActionExecutor//fixOffsetBeforeMatch}.

+ +// Constructs a Newindexed custom action by associating a character offset +// with a {@link LexerAction}. +// +//

Note: This class is only required for lexer actions for which +// {@link LexerAction//isPositionDependent} returns {@code true}.

+// +// @param offset The offset into the input {@link CharStream}, relative to +// the token start index, at which the specified lexer action should be +// executed. +// @param action The lexer action to execute at a particular offset in the +// input {@link CharStream}. +type LexerIndexedCustomAction struct { + *BaseLexerAction + + offset int + lexerAction LexerAction + isPositionDependent bool +} + +func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { + + l := new(LexerIndexedCustomAction) + l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType()) + + l.offset = offset + l.lexerAction = lexerAction + l.isPositionDependent = true + + return l +} + +//

This method calls {@link //execute} on the result of {@link //getAction} +// using the provided {@code lexer}.

+func (l *LexerIndexedCustomAction) execute(lexer Lexer) { + // assume the input stream position was properly set by the calling code + l.lexerAction.execute(lexer) +} + +func (l *LexerIndexedCustomAction) hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.offset) + h = murmurUpdate(h, l.lexerAction.hash()) + return murmurFinish(h, 3) +} + +func (l *LexerIndexedCustomAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerIndexedCustomAction); !ok { + return false + } else { + return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go new file mode 100644 index 00000000000..80b949a1a54 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go @@ -0,0 +1,170 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// Represents an executor for a sequence of lexer actions which traversed during +// the Matching operation of a lexer rule (token). +// +//

The executor tracks position information for position-dependent lexer actions +// efficiently, ensuring that actions appearing only at the end of the rule do +// not cause bloating of the {@link DFA} created for the lexer.

+ +type LexerActionExecutor struct { + lexerActions []LexerAction + cachedHash int +} + +func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { + + if lexerActions == nil { + lexerActions = make([]LexerAction, 0) + } + + l := new(LexerActionExecutor) + + l.lexerActions = lexerActions + + // Caches the result of {@link //hashCode} since the hash code is an element + // of the performance-critical {@link LexerATNConfig//hashCode} operation. + l.cachedHash = murmurInit(57) + for _, a := range lexerActions { + l.cachedHash = murmurUpdate(l.cachedHash, a.hash()) + } + + return l +} + +// Creates a {@link LexerActionExecutor} which executes the actions for +// the input {@code lexerActionExecutor} followed by a specified +// {@code lexerAction}. +// +// @param lexerActionExecutor The executor for actions already traversed by +// the lexer while Matching a token within a particular +// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as +// though it were an empty executor. +// @param lexerAction The lexer action to execute after the actions +// specified in {@code lexerActionExecutor}. +// +// @return A {@link LexerActionExecutor} for executing the combine actions +// of {@code lexerActionExecutor} and {@code lexerAction}. +func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { + if lexerActionExecutor == nil { + return NewLexerActionExecutor([]LexerAction{lexerAction}) + } + + return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) +} + +// Creates a {@link LexerActionExecutor} which encodes the current offset +// for position-dependent lexer actions. +// +//

Normally, when the executor encounters lexer actions where +// {@link LexerAction//isPositionDependent} returns {@code true}, it calls +// {@link IntStream//seek} on the input {@link CharStream} to set the input +// position to the end of the current token. This behavior provides +// for efficient DFA representation of lexer actions which appear at the end +// of a lexer rule, even when the lexer rule Matches a variable number of +// characters.

+// +//

Prior to traversing a Match transition in the ATN, the current offset +// from the token start index is assigned to all position-dependent lexer +// actions which have not already been assigned a fixed offset. By storing +// the offsets relative to the token start index, the DFA representation of +// lexer actions which appear in the middle of tokens remains efficient due +// to sharing among tokens of the same length, regardless of their absolute +// position in the input stream.

+// +//

If the current executor already has offsets assigned to all +// position-dependent lexer actions, the method returns {@code this}.

+// +// @param offset The current offset to assign to all position-dependent +// lexer actions which do not already have offsets assigned. +// +// @return A {@link LexerActionExecutor} which stores input stream offsets +// for all position-dependent lexer actions. +// / +func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { + var updatedLexerActions []LexerAction + for i := 0; i < len(l.lexerActions); i++ { + _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) + if l.lexerActions[i].getIsPositionDependent() && !ok { + if updatedLexerActions == nil { + updatedLexerActions = make([]LexerAction, 0) + + for _, a := range l.lexerActions { + updatedLexerActions = append(updatedLexerActions, a) + } + } + + updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) + } + } + if updatedLexerActions == nil { + return l + } + + return NewLexerActionExecutor(updatedLexerActions) +} + +// Execute the actions encapsulated by l executor within the context of a +// particular {@link Lexer}. +// +//

This method calls {@link IntStream//seek} to set the position of the +// {@code input} {@link CharStream} prior to calling +// {@link LexerAction//execute} on a position-dependent action. Before the +// method returns, the input position will be restored to the same position +// it was in when the method was invoked.

+// +// @param lexer The lexer instance. +// @param input The input stream which is the source for the current token. +// When l method is called, the current {@link IntStream//index} for +// {@code input} should be the start of the following token, i.e. 1 +// character past the end of the current token. +// @param startIndex The token start index. This value may be passed to +// {@link IntStream//seek} to set the {@code input} position to the beginning +// of the token. +// / +func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { + requiresSeek := false + stopIndex := input.Index() + + defer func() { + if requiresSeek { + input.Seek(stopIndex) + } + }() + + for i := 0; i < len(l.lexerActions); i++ { + lexerAction := l.lexerActions[i] + if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { + offset := la.offset + input.Seek(startIndex + offset) + lexerAction = la.lexerAction + requiresSeek = (startIndex + offset) != stopIndex + } else if lexerAction.getIsPositionDependent() { + input.Seek(stopIndex) + requiresSeek = false + } + lexerAction.execute(lexer) + } +} + +func (l *LexerActionExecutor) hash() int { + if l == nil { + return 61 + } + return l.cachedHash +} + +func (l *LexerActionExecutor) equals(other interface{}) bool { + if l == other { + return true + } else if _, ok := other.(*LexerActionExecutor); !ok { + return false + } else { + return l.cachedHash == other.(*LexerActionExecutor).cachedHash && + &l.lexerActions == &other.(*LexerActionExecutor).lexerActions + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go new file mode 100644 index 00000000000..131364f75cc --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go @@ -0,0 +1,658 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +var ( + LexerATNSimulatorDebug = false + LexerATNSimulatorDFADebug = false + + LexerATNSimulatorMinDFAEdge = 0 + LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN + + LexerATNSimulatorMatchCalls = 0 +) + +type ILexerATNSimulator interface { + IATNSimulator + + reset() + Match(input CharStream, mode int) int + GetCharPositionInLine() int + GetLine() int + GetText(input CharStream) string + Consume(input CharStream) +} + +type LexerATNSimulator struct { + *BaseATNSimulator + + recog Lexer + predictionMode int + mergeCache DoubleDict + startIndex int + Line int + CharPositionInLine int + mode int + prevAccept *SimState + MatchCalls int +} + +func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { + l := new(LexerATNSimulator) + + l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + + l.decisionToDFA = decisionToDFA + l.recog = recog + // The current token's starting index into the character stream. + // Shared across DFA to ATN simulation in case the ATN fails and the + // DFA did not have a previous accept state. In l case, we use the + // ATN-generated exception object. + l.startIndex = -1 + // line number 1..n within the input/// + l.Line = 1 + // The index of the character relative to the beginning of the line + // 0..n-1/// + l.CharPositionInLine = 0 + l.mode = LexerDefaultMode + // Used during DFA/ATN exec to record the most recent accept configuration + // info + l.prevAccept = NewSimState() + // done + return l +} + +func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { + l.CharPositionInLine = simulator.CharPositionInLine + l.Line = simulator.Line + l.mode = simulator.mode + l.startIndex = simulator.startIndex +} + +func (l *LexerATNSimulator) Match(input CharStream, mode int) int { + l.MatchCalls++ + l.mode = mode + mark := input.Mark() + + defer func() { + input.Release(mark) + }() + + l.startIndex = input.Index() + l.prevAccept.reset() + + dfa := l.decisionToDFA[mode] + + if dfa.s0 == nil { + return l.MatchATN(input) + } + + return l.execATN(input, dfa.s0) +} + +func (l *LexerATNSimulator) reset() { + l.prevAccept.reset() + l.startIndex = -1 + l.Line = 1 + l.CharPositionInLine = 0 + l.mode = LexerDefaultMode +} + +func (l *LexerATNSimulator) MatchATN(input CharStream) int { + startState := l.atn.modeToStartState[l.mode] + + if LexerATNSimulatorDebug { + fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) + } + oldMode := l.mode + s0Closure := l.computeStartState(input, startState) + suppressEdge := s0Closure.hasSemanticContext + s0Closure.hasSemanticContext = false + + next := l.addDFAState(s0Closure) + + if !suppressEdge { + l.decisionToDFA[l.mode].setS0(next) + } + + predict := l.execATN(input, next) + + if LexerATNSimulatorDebug { + fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) + } + return predict +} + +func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { + + if LexerATNSimulatorDebug { + fmt.Println("start state closure=" + ds0.configs.String()) + } + if ds0.isAcceptState { + // allow zero-length tokens + l.captureSimState(l.prevAccept, input, ds0) + } + t := input.LA(1) + s := ds0 // s is current/from DFA state + + for { // while more work + if LexerATNSimulatorDebug { + fmt.Println("execATN loop starting closure: " + s.configs.String()) + } + + // As we move src->trg, src->trg, we keep track of the previous trg to + // avoid looking up the DFA state again, which is expensive. + // If the previous target was already part of the DFA, we might + // be able to avoid doing a reach operation upon t. If s!=nil, + // it means that semantic predicates didn't prevent us from + // creating a DFA state. Once we know s!=nil, we check to see if + // the DFA state has an edge already for t. If so, we can just reuse + // it's configuration set there's no point in re-computing it. + // This is kind of like doing DFA simulation within the ATN + // simulation because DFA simulation is really just a way to avoid + // computing reach/closure sets. Technically, once we know that + // we have a previously added DFA state, we could jump over to + // the DFA simulator. But, that would mean popping back and forth + // a lot and making things more complicated algorithmically. + // This optimization makes a lot of sense for loops within DFA. + // A character will take us back to an existing DFA state + // that already has lots of edges out of it. e.g., .* in comments. + target := l.getExistingTargetState(s, t) + if target == nil { + target = l.computeTargetState(input, s, t) + // print("Computed:" + str(target)) + } + if target == ATNSimulatorError { + break + } + // If l is a consumable input element, make sure to consume before + // capturing the accept state so the input index, line, and char + // position accurately reflect the state of the interpreter at the + // end of the token. + if t != TokenEOF { + l.Consume(input) + } + if target.isAcceptState { + l.captureSimState(l.prevAccept, input, target) + if t == TokenEOF { + break + } + } + t = input.LA(1) + s = target // flip current DFA target becomes Newsrc/from state + } + + return l.failOrAccept(l.prevAccept, input, s.configs, t) +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// l method returns {@code nil}. +// +// @param s The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for l edge is not +// already cached +func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { + if s.edges == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge { + return nil + } + + target := s.edges[t-LexerATNSimulatorMinDFAEdge] + if LexerATNSimulatorDebug && target != nil { + fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) + } + return target +} + +// Compute a target state for an edge in the DFA, and attempt to add the +// computed state and corresponding edge to the DFA. +// +// @param input The input stream +// @param s The current DFA state +// @param t The next input symbol +// +// @return The computed target DFA state for the given input symbol +// {@code t}. If {@code t} does not lead to a valid DFA state, l method +// returns {@link //ERROR}. +func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { + reach := NewOrderedATNConfigSet() + + // if we don't find an existing DFA state + // Fill reach starting from closure, following t transitions + l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) + + if len(reach.configs) == 0 { // we got nowhere on t from s + if !reach.hasSemanticContext { + // we got nowhere on t, don't panic out l knowledge it'd + // cause a failover from DFA later. + l.addDFAEdge(s, t, ATNSimulatorError, nil) + } + // stop when we can't Match any more char + return ATNSimulatorError + } + // Add an edge from s to target DFA found/created for reach + return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) +} + +func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { + if l.prevAccept.dfaState != nil { + lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor + l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) + return prevAccept.dfaState.prediction + } + + // if no accept and EOF is first char, return EOF + if t == TokenEOF && input.Index() == l.startIndex { + return TokenEOF + } + + panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) +} + +// Given a starting configuration set, figure out all ATN configurations +// we can reach upon input {@code t}. Parameter {@code reach} is a return +// parameter. +func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { + // l is used to Skip processing for configs which have a lower priority + // than a config that already reached an accept state for the same rule + SkipAlt := ATNInvalidAltNumber + + for _, cfg := range closure.GetItems() { + currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) + if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { + continue + } + + if LexerATNSimulatorDebug { + + fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) + } + + for _, trans := range cfg.GetState().GetTransitions() { + target := l.getReachableTarget(trans, t) + if target != nil { + lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor + if lexerActionExecutor != nil { + lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) + } + treatEOFAsEpsilon := (t == TokenEOF) + config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) + if l.closure(input, config, reach, + currentAltReachedAcceptState, true, treatEOFAsEpsilon) { + // any remaining configs for l alt have a lower priority + // than the one that just reached an accept state. + SkipAlt = cfg.GetAlt() + } + } + } + } +} + +func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { + if LexerATNSimulatorDebug { + fmt.Printf("ACTION %s\n", lexerActionExecutor) + } + // seek to after last char in token + input.Seek(index) + l.Line = line + l.CharPositionInLine = charPos + if lexerActionExecutor != nil && l.recog != nil { + lexerActionExecutor.execute(l.recog, input, startIndex) + } +} + +func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState { + if trans.Matches(t, 0, LexerMaxCharValue) { + return trans.getTarget() + } + + return nil +} + +func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { + configs := NewOrderedATNConfigSet() + for i := 0; i < len(p.GetTransitions()); i++ { + target := p.GetTransitions()[i].getTarget() + cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY) + l.closure(input, cfg, configs, false, false, false) + } + + return configs +} + +// Since the alternatives within any lexer decision are ordered by +// preference, l method stops pursuing the closure as soon as an accept +// state is reached. After the first accept state is reached by depth-first +// search from {@code config}, all other (potentially reachable) states for +// l rule would have a lower priority. +// +// @return {@code true} if an accept state is reached, otherwise +// {@code false}. +func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, + currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { + + if LexerATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") + } + + _, ok := config.state.(*RuleStopState) + if ok { + + if LexerATNSimulatorDebug { + if l.recog != nil { + fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) + } else { + fmt.Printf("closure at rule stop %s\n", config) + } + } + + if config.context == nil || config.context.hasEmptyPath() { + if config.context == nil || config.context.isEmpty() { + configs.Add(config, nil) + return true + } + + configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil) + currentAltReachedAcceptState = true + } + if config.context != nil && !config.context.isEmpty() { + for i := 0; i < config.context.length(); i++ { + if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState { + newContext := config.context.GetParent(i) // "pop" return state + returnState := l.atn.states[config.context.getReturnState(i)] + cfg := NewLexerATNConfig2(config, returnState, newContext) + currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) + } + } + } + return currentAltReachedAcceptState + } + // optimization + if !config.state.GetEpsilonOnlyTransitions() { + if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision { + configs.Add(config, nil) + } + } + for j := 0; j < len(config.state.GetTransitions()); j++ { + trans := config.state.GetTransitions()[j] + cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon) + if cfg != nil { + currentAltReachedAcceptState = l.closure(input, cfg, configs, + currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) + } + } + return currentAltReachedAcceptState +} + +// side-effect: can alter configs.hasSemanticContext +func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, + configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { + + var cfg *LexerATNConfig + + if trans.getSerializationType() == TransitionRULE { + + rt := trans.(*RuleTransition) + newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber()) + cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext) + + } else if trans.getSerializationType() == TransitionPRECEDENCE { + panic("Precedence predicates are not supported in lexers.") + } else if trans.getSerializationType() == TransitionPREDICATE { + // Track traversing semantic predicates. If we traverse, + // we cannot add a DFA state for l "reach" computation + // because the DFA would not test the predicate again in the + // future. Rather than creating collections of semantic predicates + // like v3 and testing them on prediction, v4 will test them on the + // fly all the time using the ATN not the DFA. This is slower but + // semantically it's not used that often. One of the key elements to + // l predicate mechanism is not adding DFA states that see + // predicates immediately afterwards in the ATN. For example, + + // a : ID {p1}? | ID {p2}? + + // should create the start state for rule 'a' (to save start state + // competition), but should not create target of ID state. The + // collection of ATN states the following ID references includes + // states reached by traversing predicates. Since l is when we + // test them, we cannot cash the DFA state target of ID. + + pt := trans.(*PredicateTransition) + + if LexerATNSimulatorDebug { + fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) + } + configs.SetHasSemanticContext(true) + if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } else if trans.getSerializationType() == TransitionACTION { + if config.context == nil || config.context.hasEmptyPath() { + // execute actions anywhere in the start rule for a token. + // + // TODO: if the entry rule is invoked recursively, some + // actions may be executed during the recursive call. The + // problem can appear when hasEmptyPath() is true but + // isEmpty() is false. In l case, the config needs to be + // split into two contexts - one with just the empty path + // and another with everything but the empty path. + // Unfortunately, the current algorithm does not allow + // getEpsilonTarget to return two configurations, so + // additional modifications are needed before we can support + // the split operation. + lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex]) + cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor) + } else { + // ignore actions in referenced rules + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } else if trans.getSerializationType() == TransitionEPSILON { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } else if trans.getSerializationType() == TransitionATOM || + trans.getSerializationType() == TransitionRANGE || + trans.getSerializationType() == TransitionSET { + if treatEOFAsEpsilon { + if trans.Matches(TokenEOF, 0, LexerMaxCharValue) { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } + } + return cfg +} + +// Evaluate a predicate specified in the lexer. +// +//

If {@code speculative} is {@code true}, l method was called before +// {@link //consume} for the Matched character. This method should call +// {@link //consume} before evaluating the predicate to ensure position +// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, +// and {@link Lexer//getcolumn}, properly reflect the current +// lexer state. This method should restore {@code input} and the simulator +// to the original state before returning (i.e. undo the actions made by the +// call to {@link //consume}.

+// +// @param input The input stream. +// @param ruleIndex The rule containing the predicate. +// @param predIndex The index of the predicate within the rule. +// @param speculative {@code true} if the current index in {@code input} is +// one character before the predicate's location. +// +// @return {@code true} if the specified predicate evaluates to +// {@code true}. +// / +func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { + // assume true if no recognizer was provided + if l.recog == nil { + return true + } + if !speculative { + return l.recog.Sempred(nil, ruleIndex, predIndex) + } + savedcolumn := l.CharPositionInLine + savedLine := l.Line + index := input.Index() + marker := input.Mark() + + defer func() { + l.CharPositionInLine = savedcolumn + l.Line = savedLine + input.Seek(index) + input.Release(marker) + }() + + l.Consume(input) + return l.recog.Sempred(nil, ruleIndex, predIndex) +} + +func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) { + settings.index = input.Index() + settings.line = l.Line + settings.column = l.CharPositionInLine + settings.dfaState = dfaState +} + +func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { + if to == nil && cfgs != nil { + // leading to l call, ATNConfigSet.hasSemanticContext is used as a + // marker indicating dynamic predicate evaluation makes l edge + // dependent on the specific input sequence, so the static edge in the + // DFA should be omitted. The target DFAState is still created since + // execATN has the ability to reSynchronize with the DFA state cache + // following the predicate evaluation step. + // + // TJP notes: next time through the DFA, we see a pred again and eval. + // If that gets us to a previously created (but dangling) DFA + // state, we can continue in pure DFA mode from there. + // / + suppressEdge := cfgs.HasSemanticContext() + cfgs.SetHasSemanticContext(false) + + to = l.addDFAState(cfgs) + + if suppressEdge { + return to + } + } + // add the edge + if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge { + // Only track edges within the DFA bounds + return to + } + if LexerATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) + } + if from.edges == nil { + // make room for tokens 1..n and -1 masquerading as index 0 + from.edges = make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1) + } + from.edges[tk-LexerATNSimulatorMinDFAEdge] = to // connect + + return to +} + +// Add a NewDFA state if there isn't one with l set of +// configurations already. This method also detects the first +// configuration containing an ATN rule stop state. Later, when +// traversing the DFA, we will know which rule to accept. +func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState { + + proposed := NewDFAState(-1, configs) + var firstConfigWithRuleStopState ATNConfig + + for _, cfg := range configs.GetItems() { + + _, ok := cfg.GetState().(*RuleStopState) + + if ok { + firstConfigWithRuleStopState = cfg + break + } + } + if firstConfigWithRuleStopState != nil { + proposed.isAcceptState = true + proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor + proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) + } + hash := proposed.hash() + dfa := l.decisionToDFA[l.mode] + existing, ok := dfa.getState(hash) + if ok { + return existing + } + newState := proposed + newState.stateNumber = dfa.numStates() + configs.SetReadOnly(true) + newState.configs = configs + dfa.setState(hash, newState) + return newState +} + +func (l *LexerATNSimulator) getDFA(mode int) *DFA { + return l.decisionToDFA[mode] +} + +// Get the text Matched so far for the current token. +func (l *LexerATNSimulator) GetText(input CharStream) string { + // index is first lookahead char, don't include. + return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) +} + +func (l *LexerATNSimulator) Consume(input CharStream) { + curChar := input.LA(1) + if curChar == int('\n') { + l.Line++ + l.CharPositionInLine = 0 + } else { + l.CharPositionInLine++ + } + input.Consume() +} + +func (l *LexerATNSimulator) GetCharPositionInLine() int { + return l.CharPositionInLine +} + +func (l *LexerATNSimulator) GetLine() int { + return l.Line +} + +func (l *LexerATNSimulator) GetTokenName(tt int) string { + if tt == -1 { + return "EOF" + } + + return "'" + string(tt) + "'" +} + +func resetSimState(sim *SimState) { + sim.index = -1 + sim.line = 0 + sim.column = -1 + sim.dfaState = nil +} + +type SimState struct { + index int + line int + column int + dfaState *DFAState +} + +func NewSimState() *SimState { + s := new(SimState) + resetSimState(s) + return s +} + +func (s *SimState) reset() { + resetSimState(s) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go new file mode 100644 index 00000000000..f5afd09b393 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go @@ -0,0 +1,215 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type LL1Analyzer struct { + atn *ATN +} + +func NewLL1Analyzer(atn *ATN) *LL1Analyzer { + la := new(LL1Analyzer) + la.atn = atn + return la +} + +//* Special value added to the lookahead sets to indicate that we hit +// a predicate during analysis if {@code seeThruPreds==false}. +/// +const ( + LL1AnalyzerHitPred = TokenInvalidType +) + +//* +// Calculates the SLL(1) expected lookahead set for each outgoing transition +// of an {@link ATNState}. The returned array has one element for each +// outgoing transition in {@code s}. If the closure from transition +// i leads to a semantic predicate before Matching a symbol, the +// element at index i of the result will be {@code nil}. +// +// @param s the ATN state +// @return the expected symbols for each outgoing transition of {@code s}. +func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { + if s == nil { + return nil + } + count := len(s.GetTransitions()) + look := make([]*IntervalSet, count) + for alt := 0; alt < count; alt++ { + look[alt] = NewIntervalSet() + lookBusy := NewSet(nil, nil) + seeThruPreds := false // fail to get lookahead upon pred + la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) + // Wipe out lookahead for la alternative if we found nothing + // or we had a predicate when we !seeThruPreds + if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { + look[alt] = nil + } + } + return look +} + +//* +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// +//

If {@code ctx} is {@code nil} and the end of the rule containing +// {@code s} is reached, {@link Token//EPSILON} is added to the result set. +// If {@code ctx} is not {@code nil} and the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.

+// +// @param s the ATN state +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx the complete parser context, or {@code nil} if the context +// should be ignored +// +// @return The set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +/// +func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { + r := NewIntervalSet() + seeThruPreds := true // ignore preds get all lookahead + var lookContext PredictionContext + if ctx != nil { + lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) + } + la.look1(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true) + return r +} + +//* +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// +//

If {@code ctx} is {@code nil} and {@code stopState} or the end of the +// rule containing {@code s} is reached, {@link Token//EPSILON} is added to +// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is +// {@code true} and {@code stopState} or the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.

+// +// @param s the ATN state. +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx The outer context, or {@code nil} if the outer context should +// not be used. +// @param look The result lookahead set. +// @param lookBusy A set used for preventing epsilon closures in the ATN +// from causing a stack overflow. Outside code should pass +// {@code NewSet} for la argument. +// @param calledRuleStack A set used for preventing left recursion in the +// ATN from causing a stack overflow. Outside code should pass +// {@code NewBitSet()} for la argument. +// @param seeThruPreds {@code true} to true semantic predicates as +// implicitly {@code true} and "see through them", otherwise {@code false} +// to treat semantic predicates as opaque and add {@link //HitPred} to the +// result if one is encountered. +// @param addEOF Add {@link Token//EOF} to the result if the end of the +// outermost context is reached. This parameter has no effect if {@code ctx} +// is {@code nil}. + +func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { + + returnState := la.atn.states[ctx.getReturnState(i)] + + removed := calledRuleStack.contains(returnState.GetRuleIndex()) + + defer func() { + if removed { + calledRuleStack.add(returnState.GetRuleIndex()) + } + }() + + calledRuleStack.remove(returnState.GetRuleIndex()) + la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} + +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) { + + c := NewBaseATNConfig6(s, 0, ctx) + + if lookBusy.contains(c) { + return + } + + lookBusy.add(c) + + if s == stopState { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + } + + _, ok := s.(*RuleStopState) + + if ok { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + + if ctx != BasePredictionContextEMPTY { + // run thru all possible stack tops in ctx + for i := 0; i < ctx.length(); i++ { + returnState := la.atn.states[ctx.getReturnState(i)] + la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) + } + return + } + } + + n := len(s.GetTransitions()) + + for i := 0; i < n; i++ { + t := s.GetTransitions()[i] + + if t1, ok := t.(*RuleTransition); ok { + if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { + continue + } + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) + } else if t2, ok := t.(AbstractPredicateTransition); ok { + if seeThruPreds { + la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else { + look.addOne(LL1AnalyzerHitPred) + } + } else if t.getIsEpsilon() { + la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else if _, ok := t.(*WildcardTransition); ok { + look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) + } else { + set := t.getLabel() + if set != nil { + if _, ok := t.(*NotSetTransition); ok { + set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) + } + look.addSet(set) + } + } + } +} + +func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + + defer func() { + calledRuleStack.remove(t1.getTarget().GetRuleIndex()) + }() + + calledRuleStack.add(t1.getTarget().GetRuleIndex()) + la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go new file mode 100644 index 00000000000..fb60258e331 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go @@ -0,0 +1,718 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +type Parser interface { + Recognizer + + GetInterpreter() *ParserATNSimulator + + GetTokenStream() TokenStream + GetTokenFactory() TokenFactory + GetParserRuleContext() ParserRuleContext + SetParserRuleContext(ParserRuleContext) + Consume() Token + GetParseListeners() []ParseTreeListener + + GetErrorHandler() ErrorStrategy + SetErrorHandler(ErrorStrategy) + GetInputStream() IntStream + GetCurrentToken() Token + GetExpectedTokens() *IntervalSet + NotifyErrorListeners(string, Token, RecognitionException) + IsExpectedToken(int) bool + GetPrecedence() int + GetRuleInvocationStack(ParserRuleContext) []string +} + +type BaseParser struct { + *BaseRecognizer + + Interpreter *ParserATNSimulator + BuildParseTrees bool + + input TokenStream + errHandler ErrorStrategy + precedenceStack IntStack + ctx ParserRuleContext + + tracer *TraceListener + parseListeners []ParseTreeListener + _SyntaxErrors int +} + +// p.is all the parsing support code essentially most of it is error +// recovery stuff.// +func NewBaseParser(input TokenStream) *BaseParser { + + p := new(BaseParser) + + p.BaseRecognizer = NewBaseRecognizer() + + // The input stream. + p.input = nil + // The error handling strategy for the parser. The default value is a new + // instance of {@link DefaultErrorStrategy}. + p.errHandler = NewDefaultErrorStrategy() + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + // The {@link ParserRuleContext} object for the currently executing rule. + // p.is always non-nil during the parsing process. + p.ctx = nil + // Specifies whether or not the parser should construct a parse tree during + // the parsing process. The default value is {@code true}. + p.BuildParseTrees = true + // When {@link //setTrace}{@code (true)} is called, a reference to the + // {@link TraceListener} is stored here so it can be easily removed in a + // later call to {@link //setTrace}{@code (false)}. The listener itself is + // implemented as a parser listener so p.field is not directly used by + // other parser methods. + p.tracer = nil + // The list of {@link ParseTreeListener} listeners registered to receive + // events during the parse. + p.parseListeners = nil + // The number of syntax errors Reported during parsing. p.value is + // incremented each time {@link //NotifyErrorListeners} is called. + p._SyntaxErrors = 0 + p.SetInputStream(input) + + return p +} + +// p.field maps from the serialized ATN string to the deserialized {@link +// ATN} with +// bypass alternatives. +// +// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() +// +var bypassAltsAtnCache = make(map[string]int) + +// reset the parser's state// +func (p *BaseParser) reset() { + if p.input != nil { + p.input.Seek(0) + } + p.errHandler.reset(p) + p.ctx = nil + p._SyntaxErrors = 0 + p.SetTrace(nil) + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + if p.Interpreter != nil { + p.Interpreter.reset() + } +} + +func (p *BaseParser) GetErrorHandler() ErrorStrategy { + return p.errHandler +} + +func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { + p.errHandler = e +} + +// Match current input symbol against {@code ttype}. If the symbol type +// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are +// called to complete the Match process. +// +//

If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

+// +// @param ttype the token type to Match +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// {@code ttype} and the error strategy could not recover from the +// mismatched symbol + +func (p *BaseParser) Match(ttype int) Token { + + t := p.GetCurrentToken() + + if t.GetTokenType() == ttype { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a Newtoken during single token + // insertion + // if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + + return t +} + +// Match current input symbol as a wildcard. If the symbol type Matches +// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} +// and {@link //consume} are called to complete the Match process. +// +//

If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

+// +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// a wildcard and the error strategy could not recover from the mismatched +// symbol + +func (p *BaseParser) MatchWildcard() Token { + t := p.GetCurrentToken() + if t.GetTokenType() > 0 { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a Newtoken during single token + // insertion + // if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + return t +} + +func (p *BaseParser) GetParserRuleContext() ParserRuleContext { + return p.ctx +} + +func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { + p.ctx = v +} + +func (p *BaseParser) GetParseListeners() []ParseTreeListener { + if p.parseListeners == nil { + return make([]ParseTreeListener, 0) + } + return p.parseListeners +} + +// Registers {@code listener} to receive events during the parsing process. +// +//

To support output-preserving grammar transformations (including but not +// limited to left-recursion removal, automated left-factoring, and +// optimized code generation), calls to listener methods during the parse +// may differ substantially from calls made by +// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In +// particular, rule entry and exit events may occur in a different order +// during the parse than after the parser. In addition, calls to certain +// rule entry methods may be omitted.

+// +//

With the following specific exceptions, calls to listener events are +// deterministic, i.e. for identical input the calls to listener +// methods will be the same.

+// +//
    +//
  • Alterations to the grammar used to generate code may change the +// behavior of the listener calls.
  • +//
  • Alterations to the command line options passed to ANTLR 4 when +// generating the parser may change the behavior of the listener calls.
  • +//
  • Changing the version of the ANTLR Tool used to generate the parser +// may change the behavior of the listener calls.
  • +//
+// +// @param listener the listener to add +// +// @panics nilPointerException if {@code} listener is {@code nil} +// +func (p *BaseParser) AddParseListener(listener ParseTreeListener) { + if listener == nil { + panic("listener") + } + if p.parseListeners == nil { + p.parseListeners = make([]ParseTreeListener, 0) + } + p.parseListeners = append(p.parseListeners, listener) +} + +// +// Remove {@code listener} from the list of parse listeners. +// +//

If {@code listener} is {@code nil} or has not been added as a parse +// listener, p.method does nothing.

+// @param listener the listener to remove +// +func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { + + if p.parseListeners != nil { + + idx := -1 + for i, v := range p.parseListeners { + if v == listener { + idx = i + break + } + } + + if idx == -1 { + return + } + + // remove the listener from the slice + p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) + + if len(p.parseListeners) == 0 { + p.parseListeners = nil + } + } +} + +// Remove all parse listeners. +func (p *BaseParser) removeParseListeners() { + p.parseListeners = nil +} + +// Notify any parse listeners of an enter rule event. +func (p *BaseParser) TriggerEnterRuleEvent() { + if p.parseListeners != nil { + ctx := p.ctx + for _, listener := range p.parseListeners { + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) + } + } +} + +// +// Notify any parse listeners of an exit rule event. +// +// @see //addParseListener +// +func (p *BaseParser) TriggerExitRuleEvent() { + if p.parseListeners != nil { + // reverse order walk of listeners + ctx := p.ctx + l := len(p.parseListeners) - 1 + + for i := range p.parseListeners { + listener := p.parseListeners[l-i] + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) + } + } +} + +func (p *BaseParser) GetInterpreter() *ParserATNSimulator { + return p.Interpreter +} + +func (p *BaseParser) GetATN() *ATN { + return p.Interpreter.atn +} + +func (p *BaseParser) GetTokenFactory() TokenFactory { + return p.input.GetTokenSource().GetTokenFactory() +} + +// Tell our token source and error strategy about a Newway to create tokens.// +func (p *BaseParser) setTokenFactory(factory TokenFactory) { + p.input.GetTokenSource().setTokenFactory(factory) +} + +// The ATN with bypass alternatives is expensive to create so we create it +// lazily. +// +// @panics UnsupportedOperationException if the current parser does not +// implement the {@link //getSerializedATN()} method. +// +func (p *BaseParser) GetATNWithBypassAlts() { + + // TODO + panic("Not implemented!") + + // serializedAtn := p.getSerializedATN() + // if (serializedAtn == nil) { + // panic("The current parser does not support an ATN with bypass alternatives.") + // } + // result := p.bypassAltsAtnCache[serializedAtn] + // if (result == nil) { + // deserializationOptions := NewATNDeserializationOptions(nil) + // deserializationOptions.generateRuleBypassTransitions = true + // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) + // p.bypassAltsAtnCache[serializedAtn] = result + // } + // return result +} + +// The preferred method of getting a tree pattern. For example, here's a +// sample use: +// +//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+// 
+ +func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { + + panic("NewParseTreePatternMatcher not implemented!") + // + // if (lexer == nil) { + // if (p.GetTokenStream() != nil) { + // tokenSource := p.GetTokenStream().GetTokenSource() + // if _, ok := tokenSource.(ILexer); ok { + // lexer = tokenSource + // } + // } + // } + // if (lexer == nil) { + // panic("Parser can't discover a lexer to use") + // } + + // m := NewParseTreePatternMatcher(lexer, p) + // return m.compile(pattern, patternRuleIndex) +} + +func (p *BaseParser) GetInputStream() IntStream { + return p.GetTokenStream() +} + +func (p *BaseParser) SetInputStream(input TokenStream) { + p.SetTokenStream(input) +} + +func (p *BaseParser) GetTokenStream() TokenStream { + return p.input +} + +// Set the token stream and reset the parser.// +func (p *BaseParser) SetTokenStream(input TokenStream) { + p.input = nil + p.reset() + p.input = input +} + +// Match needs to return the current input symbol, which gets put +// into the label for the associated token ref e.g., x=ID. +// +func (p *BaseParser) GetCurrentToken() Token { + return p.input.LT(1) +} + +func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { + if offendingToken == nil { + offendingToken = p.GetCurrentToken() + } + p._SyntaxErrors++ + line := offendingToken.GetLine() + column := offendingToken.GetColumn() + listener := p.GetErrorListenerDispatch() + listener.SyntaxError(p, offendingToken, line, column, msg, err) +} + +func (p *BaseParser) Consume() Token { + o := p.GetCurrentToken() + if o.GetTokenType() != TokenEOF { + p.GetInputStream().Consume() + } + hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 + if p.BuildParseTrees || hasListener { + if p.errHandler.inErrorRecoveryMode(p) { + node := p.ctx.AddErrorNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitErrorNode(node) + } + } + + } else { + node := p.ctx.AddTokenNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitTerminal(node) + } + } + } + // node.invokingState = p.state + } + + return o +} + +func (p *BaseParser) addContextToParseTree() { + // add current context to parent if we have a parent + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) + } +} + +func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { + p.SetState(state) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.BuildParseTrees { + p.addContextToParseTree() + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() + } +} + +func (p *BaseParser) ExitRule() { + p.ctx.SetStop(p.input.LT(-1)) + // trigger event on ctx, before it reverts to parent + if p.parseListeners != nil { + p.TriggerExitRuleEvent() + } + p.SetState(p.ctx.GetInvokingState()) + if p.ctx.GetParent() != nil { + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } else { + p.ctx = nil + } +} + +func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { + localctx.SetAltNumber(altNum) + // if we have Newlocalctx, make sure we replace existing ctx + // that is previous child of parse tree + if p.BuildParseTrees && p.ctx != localctx { + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() + p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) + } + } + p.ctx = localctx +} + +// Get the precedence level for the top-most precedence rule. +// +// @return The precedence level for the top-most precedence rule, or -1 if +// the parser context is not nested within a precedence rule. + +func (p *BaseParser) GetPrecedence() int { + if len(p.precedenceStack) == 0 { + return -1 + } + + return p.precedenceStack[len(p.precedenceStack)-1] +} + +func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { + p.SetState(state) + p.precedenceStack.Push(precedence) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +// +// Like {@link //EnterRule} but for recursive rules. + +func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { + previous := p.ctx + previous.SetParent(localctx) + previous.SetInvokingState(state) + previous.SetStop(p.input.LT(-1)) + + p.ctx = localctx + p.ctx.SetStart(previous.GetStart()) + if p.BuildParseTrees { + p.ctx.AddChild(previous) + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { + p.precedenceStack.Pop() + p.ctx.SetStop(p.input.LT(-1)) + retCtx := p.ctx // save current ctx (return value) + // unroll so ctx is as it was before call to recursive method + if p.parseListeners != nil { + for p.ctx != parentCtx { + p.TriggerExitRuleEvent() + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } + } else { + p.ctx = parentCtx + } + // hook into tree + retCtx.SetParent(parentCtx) + if p.BuildParseTrees && parentCtx != nil { + // add return ctx into invoking rule's tree + parentCtx.AddChild(retCtx) + } +} + +func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { + ctx := p.ctx + for ctx != nil { + if ctx.GetRuleIndex() == ruleIndex { + return ctx + } + ctx = ctx.GetParent().(ParserRuleContext) + } + return nil +} + +func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { + return precedence >= p.precedenceStack[len(p.precedenceStack)-1] +} + +func (p *BaseParser) inContext(context ParserRuleContext) bool { + // TODO: useful in parser? + return false +} + +// +// Checks whether or not {@code symbol} can follow the current state in the +// ATN. The behavior of p.method is equivalent to the following, but is +// implemented such that the complete context-sensitive follow set does not +// need to be explicitly constructed. +// +//
+// return getExpectedTokens().contains(symbol)
+// 
+// +// @param symbol the symbol type to check +// @return {@code true} if {@code symbol} can follow the current state in +// the ATN, otherwise {@code false}. + +func (p *BaseParser) IsExpectedToken(symbol int) bool { + atn := p.Interpreter.atn + ctx := p.ctx + s := atn.states[p.state] + following := atn.NextTokens(s, nil) + if following.contains(symbol) { + return true + } + if !following.contains(TokenEpsilon) { + return false + } + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + following = atn.NextTokens(rt.(*RuleTransition).followState, nil) + if following.contains(symbol) { + return true + } + ctx = ctx.GetParent().(ParserRuleContext) + } + if following.contains(TokenEpsilon) && symbol == TokenEOF { + return true + } + + return false +} + +// Computes the set of input symbols which could follow the current parser +// state and context, as given by {@link //GetState} and {@link //GetContext}, +// respectively. +// +// @see ATN//getExpectedTokens(int, RuleContext) +// +func (p *BaseParser) GetExpectedTokens() *IntervalSet { + return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) +} + +func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { + atn := p.Interpreter.atn + s := atn.states[p.state] + return atn.NextTokens(s, nil) +} + +// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// +func (p *BaseParser) GetRuleIndex(ruleName string) int { + var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] + if ok { + return ruleIndex + } + + return -1 +} + +// Return List<String> of the rule names in your parser instance +// leading up to a call to the current rule. You could override if +// you want more details such as the file/line info of where +// in the ATN a rule is invoked. +// +// this very useful for error messages. + +func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { + if c == nil { + c = p.ctx + } + stack := make([]string, 0) + for c != nil { + // compute what follows who invoked us + ruleIndex := c.GetRuleIndex() + if ruleIndex < 0 { + stack = append(stack, "n/a") + } else { + stack = append(stack, p.GetRuleNames()[ruleIndex]) + } + + vp := c.GetParent() + + if vp == nil { + break + } + + c = vp.(ParserRuleContext) + } + return stack +} + +// For debugging and other purposes.// +func (p *BaseParser) GetDFAStrings() string { + return fmt.Sprint(p.Interpreter.decisionToDFA) +} + +// For debugging and other purposes.// +func (p *BaseParser) DumpDFA() { + seenOne := false + for _, dfa := range p.Interpreter.decisionToDFA { + if dfa.numStates() > 0 { + if seenOne { + fmt.Println() + } + fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") + fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) + seenOne = true + } + } +} + +func (p *BaseParser) GetSourceName() string { + return p.GrammarFileName +} + +// During a parse is sometimes useful to listen in on the rule entry and exit +// events as well as token Matches. p.is for quick and dirty debugging. +// +func (p *BaseParser) SetTrace(trace *TraceListener) { + if trace == nil { + p.RemoveParseListener(p.tracer) + p.tracer = nil + } else { + if p.tracer != nil { + p.RemoveParseListener(p.tracer) + } + p.tracer = NewTraceListener(p) + p.AddParseListener(p.tracer) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go new file mode 100644 index 00000000000..128b9a96d4b --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go @@ -0,0 +1,1473 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + ParserATNSimulatorDebug = false + ParserATNSimulatorListATNDecisions = false + ParserATNSimulatorDFADebug = false + ParserATNSimulatorRetryDebug = false +) + +type ParserATNSimulator struct { + *BaseATNSimulator + + parser Parser + predictionMode int + input TokenStream + startIndex int + dfa *DFA + mergeCache *DoubleDict + outerContext ParserRuleContext +} + +func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { + + p := new(ParserATNSimulator) + + p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + + p.parser = parser + p.decisionToDFA = decisionToDFA + // SLL, LL, or LL + exact ambig detection?// + p.predictionMode = PredictionModeLL + // LAME globals to avoid parameters!!!!! I need these down deep in predTransition + p.input = nil + p.startIndex = 0 + p.outerContext = nil + p.dfa = nil + // Each prediction operation uses a cache for merge of prediction contexts. + // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap + // isn't Synchronized but we're ok since two threads shouldn't reuse same + // parser/atnsim object because it can only handle one input at a time. + // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid + // the merge if we ever see a and b again. Note that (b,a)&rarrc should + // also be examined during cache lookup. + // + p.mergeCache = nil + + return p +} + +func (p *ParserATNSimulator) GetPredictionMode() int { + return p.predictionMode +} + +func (p *ParserATNSimulator) SetPredictionMode(v int) { + p.predictionMode = v +} + +func (p *ParserATNSimulator) reset() { +} + +func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { + if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + + strconv.Itoa(input.LT(1).GetColumn())) + } + + p.input = input + p.startIndex = input.Index() + p.outerContext = outerContext + + dfa := p.decisionToDFA[decision] + p.dfa = dfa + m := input.Mark() + index := input.Index() + + defer func() { + p.dfa = nil + p.mergeCache = nil // wack cache after each prediction + input.Seek(index) + input.Release(m) + }() + + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + var s0 *DFAState + if dfa.precedenceDfa { + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.s0 + } + + if s0 == nil { + if outerContext == nil { + outerContext = RuleContextEmpty + } + if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) + } + // If p is not a precedence DFA, we check the ATN start state + // to determine if p ATN start state is the decision for the + // closure block that determines whether a precedence rule + // should continue or complete. + + t2 := dfa.atnStartState + t, ok := t2.(*StarLoopEntryState) + if !dfa.precedenceDfa && ok { + if t.precedenceRuleDecision { + dfa.setPrecedenceDfa(true) + } + } + fullCtx := false + s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx) + + if dfa.precedenceDfa { + // If p is a precedence DFA, we use applyPrecedenceFilter + // to convert the computed start state to a precedence start + // state. We then use DFA.setPrecedenceStartState to set the + // appropriate start state for the precedence level rather + // than simply setting DFA.s0. + // + s0Closure = p.applyPrecedenceFilter(s0Closure) + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) + } else { + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + dfa.s0 = s0 + } + } + alt := p.execATN(dfa, s0, input, index, outerContext) + if ParserATNSimulatorDebug { + fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) + } + return alt + +} + +// Performs ATN simulation to compute a predicted alternative based +// upon the remaining input, but also updates the DFA cache to avoid +// having to traverse the ATN again for the same input sequence. + +// There are some key conditions we're looking for after computing a new +// set of ATN configs (proposed DFA state): +// if the set is empty, there is no viable alternative for current symbol +// does the state uniquely predict an alternative? +// does the state have a conflict that would prevent us from +// putting it on the work list? + +// We also have some key operations to do: +// add an edge from previous DFA state to potentially NewDFA state, D, +// upon current symbol but only if adding to work list, which means in all +// cases except no viable alternative (and possibly non-greedy decisions?) +// collecting predicates and adding semantic context to DFA accept states +// adding rule context to context-sensitive DFA accept states +// consuming an input symbol +// Reporting a conflict +// Reporting an ambiguity +// Reporting a context sensitivity +// Reporting insufficient predicates + +// cover these cases: +// dead end +// single alt +// single alt + preds +// conflict +// conflict + preds +// +func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { + + if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) + } + + previousD := s0 + + if ParserATNSimulatorDebug { + fmt.Println("s0 = " + s0.String()) + } + t := input.LA(1) + for { // for more work + D := p.getExistingTargetState(previousD, t) + if D == nil { + D = p.computeTargetState(dfa, previousD, t) + } + if D == ATNSimulatorError { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for SLL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) + if alt != ATNInvalidAltNumber { + return alt + } + + panic(e) + } + if D.requiresFullContext && p.predictionMode != PredictionModeSLL { + // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + conflictingAlts := D.configs.GetConflictingAlts() + if D.predicates != nil { + if ParserATNSimulatorDebug { + fmt.Println("DFA state has preds in DFA sim LL failover") + } + conflictIndex := input.Index() + if conflictIndex != startIndex { + input.Seek(startIndex) + } + conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) + if conflictingAlts.length() == 1 { + if ParserATNSimulatorDebug { + fmt.Println("Full LL avoided") + } + return conflictingAlts.minValue() + } + if conflictIndex != startIndex { + // restore the index so Reporting the fallback to full + // context occurs with the index at the correct spot + input.Seek(conflictIndex) + } + } + if ParserATNSimulatorDFADebug { + fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) + } + fullCtx := true + s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) + p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) + alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) + return alt + } + if D.isAcceptState { + if D.predicates == nil { + return D.prediction + } + stopIndex := input.Index() + input.Seek(startIndex) + alts := p.evalSemanticContext(D.predicates, outerContext, true) + if alts.length() == 0 { + panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) + } else if alts.length() == 1 { + return alts.minValue() + } else { + // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. + p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) + return alts.minValue() + } + } + previousD = D + + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } + + panic("Should not have reached p state") +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// p method returns {@code nil}. +// +// @param previousD The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for p edge is not +// already cached + +func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { + edges := previousD.edges + if edges == nil { + return nil + } + + return edges[t+1] +} + +// Compute a target state for an edge in the DFA, and attempt to add the +// computed state and corresponding edge to the DFA. +// +// @param dfa The DFA +// @param previousD The current DFA state +// @param t The next input symbol +// +// @return The computed target DFA state for the given input symbol +// {@code t}. If {@code t} does not lead to a valid DFA state, p method +// returns {@link //ERROR}. + +func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { + reach := p.computeReachSet(previousD.configs, t, false) + + if reach == nil { + p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) + return ATNSimulatorError + } + // create Newtarget state we'll add to DFA after it's complete + D := NewDFAState(-1, reach) + + predictedAlt := p.getUniqueAlt(reach) + + if ParserATNSimulatorDebug { + altSubSets := PredictionModegetConflictingAltSubsets(reach) + fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + + ", previous=" + previousD.configs.String() + + ", configs=" + reach.String() + + ", predict=" + strconv.Itoa(predictedAlt) + + ", allSubsetsConflict=" + + fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + + ", conflictingAlts=" + p.getConflictingAlts(reach).String()) + } + if predictedAlt != ATNInvalidAltNumber { + // NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = true + D.configs.SetUniqueAlt(predictedAlt) + D.setPrediction(predictedAlt) + } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { + // MORE THAN ONE VIABLE ALTERNATIVE + D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) + D.requiresFullContext = true + // in SLL-only mode, we will stop at p state and return the minimum alt + D.isAcceptState = true + D.setPrediction(D.configs.GetConflictingAlts().minValue()) + } + if D.isAcceptState && D.configs.HasSemanticContext() { + p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) + if D.predicates != nil { + D.setPrediction(ATNInvalidAltNumber) + } + } + // all adds to dfa are done after we've created full D state + D = p.addDFAEdge(dfa, previousD, t, D) + return D +} + +func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { + // We need to test all predicates, even in DFA states that + // uniquely predict alternative. + nalts := len(decisionState.GetTransitions()) + // Update DFA so reach becomes accept state with (predicate,alt) + // pairs if preds found for conflicting alts + altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) + altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) + if altToPred != nil { + dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) + dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds + } else { + // There are preds in configs but they might go away + // when OR'd together like {p}? || NONE == NONE. If neither + // alt has preds, resolve to min alt + dfaState.setPrediction(altsToCollectPredsFrom.minValue()) + } +} + +// comes back with reach.uniqueAlt set to a valid alt +func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { + + if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + fmt.Println("execATNWithFullContext " + s0.String()) + } + + fullCtx := true + foundExactAmbig := false + var reach ATNConfigSet + previous := s0 + input.Seek(startIndex) + t := input.LA(1) + predictedAlt := -1 + + for { // for more work + reach = p.computeReachSet(previous, t, fullCtx) + if reach == nil { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for LL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + e := p.noViableAlt(input, outerContext, previous, startIndex) + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) + if alt != ATNInvalidAltNumber { + return alt + } + + panic(e) + } + altSubSets := PredictionModegetConflictingAltSubsets(reach) + if ParserATNSimulatorDebug { + fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + + strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + + fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) + } + reach.SetUniqueAlt(p.getUniqueAlt(reach)) + // unique prediction? + if reach.GetUniqueAlt() != ATNInvalidAltNumber { + predictedAlt = reach.GetUniqueAlt() + break + } else if p.predictionMode != PredictionModeLLExactAmbigDetection { + predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) + if predictedAlt != ATNInvalidAltNumber { + break + } + } else { + // In exact ambiguity mode, we never try to terminate early. + // Just keeps scarfing until we know what the conflict is + if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { + foundExactAmbig = true + predictedAlt = PredictionModegetSingleViableAlt(altSubSets) + break + } + // else there are multiple non-conflicting subsets or + // we're not sure what the ambiguity is yet. + // So, keep going. + } + previous = reach + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } + // If the configuration set uniquely predicts an alternative, + // without conflict, then we know that it's a full LL decision + // not SLL. + if reach.GetUniqueAlt() != ATNInvalidAltNumber { + p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) + return predictedAlt + } + // We do not check predicates here because we have checked them + // on-the-fly when doing full context prediction. + + // + // In non-exact ambiguity detection mode, we might actually be able to + // detect an exact ambiguity, but I'm not going to spend the cycles + // needed to check. We only emit ambiguity warnings in exact ambiguity + // mode. + // + // For example, we might know that we have conflicting configurations. + // But, that does not mean that there is no way forward without a + // conflict. It's possible to have nonconflicting alt subsets as in: + + // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + + // from + // + // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + // + // In p case, (17,1,[5 $]) indicates there is some next sequence that + // would resolve p without conflict to alternative 1. Any other viable + // next sequence, however, is associated with a conflict. We stop + // looking for input because no amount of further lookahead will alter + // the fact that we should predict alternative 1. We just can't say for + // sure that there is an ambiguity without looking further. + + p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, nil, reach) + + return predictedAlt +} + +func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { + if ParserATNSimulatorDebug { + fmt.Println("in computeReachSet, starting closure: " + closure.String()) + } + if p.mergeCache == nil { + p.mergeCache = NewDoubleDict() + } + intermediate := NewBaseATNConfigSet(fullCtx) + + // Configurations already in a rule stop state indicate reaching the end + // of the decision rule (local context) or end of the start rule (full + // context). Once reached, these configurations are never updated by a + // closure operation, so they are handled separately for the performance + // advantage of having a smaller intermediate set when calling closure. + // + // For full-context reach operations, separate handling is required to + // ensure that the alternative Matching the longest overall sequence is + // chosen when multiple such configurations can Match the input. + + var SkippedStopStates []*BaseATNConfig + + // First figure out where we can reach on input t + for _, c := range closure.GetItems() { + if ParserATNSimulatorDebug { + fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) + } + + _, ok := c.GetState().(*RuleStopState) + + if ok { + if fullCtx || t == TokenEOF { + if SkippedStopStates == nil { + SkippedStopStates = make([]*BaseATNConfig, 0) + } + SkippedStopStates = append(SkippedStopStates, c.(*BaseATNConfig)) + if ParserATNSimulatorDebug { + fmt.Println("added " + c.String() + " to SkippedStopStates") + } + } + continue + } + + for j := 0; j < len(c.GetState().GetTransitions()); j++ { + trans := c.GetState().GetTransitions()[j] + target := p.getReachableTarget(trans, t) + if target != nil { + cfg := NewBaseATNConfig4(c, target) + intermediate.Add(cfg, p.mergeCache) + if ParserATNSimulatorDebug { + fmt.Println("added " + cfg.String() + " to intermediate") + } + } + } + } + // Now figure out where the reach operation can take us... + var reach ATNConfigSet + + // This block optimizes the reach operation for intermediate sets which + // trivially indicate a termination state for the overall + // AdaptivePredict operation. + // + // The conditions assume that intermediate + // contains all configurations relevant to the reach set, but p + // condition is not true when one or more configurations have been + // withheld in SkippedStopStates, or when the current symbol is EOF. + // + if SkippedStopStates == nil && t != TokenEOF { + if len(intermediate.configs) == 1 { + // Don't pursue the closure if there is just one state. + // It can only have one alternative just add to result + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } + } + // If the reach set could not be trivially determined, perform a closure + // operation on the intermediate set to compute its initial value. + // + if reach == nil { + reach = NewBaseATNConfigSet(fullCtx) + closureBusy := NewSet(nil, nil) + treatEOFAsEpsilon := t == TokenEOF + for k := 0; k < len(intermediate.configs); k++ { + p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) + } + } + if t == TokenEOF { + // After consuming EOF no additional input is possible, so we are + // only interested in configurations which reached the end of the + // decision rule (local context) or end of the start rule (full + // context). Update reach to contain only these configurations. This + // handles both explicit EOF transitions in the grammar and implicit + // EOF transitions following the end of the decision or start rule. + // + // When reach==intermediate, no closure operation was performed. In + // p case, removeAllConfigsNotInRuleStopState needs to check for + // reachable rule stop states as well as configurations already in + // a rule stop state. + // + // This is handled before the configurations in SkippedStopStates, + // because any configurations potentially added from that list are + // already guaranteed to meet p condition whether or not it's + // required. + // + reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) + } + // If SkippedStopStates!=nil, then it contains at least one + // configuration. For full-context reach operations, these + // configurations reached the end of the start rule, in which case we + // only add them back to reach if no configuration during the current + // closure operation reached such a state. This ensures AdaptivePredict + // chooses an alternative Matching the longest overall sequence when + // multiple alternatives are viable. + // + if SkippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { + for l := 0; l < len(SkippedStopStates); l++ { + reach.Add(SkippedStopStates[l], p.mergeCache) + } + } + if len(reach.GetItems()) == 0 { + return nil + } + + return reach +} + +// +// Return a configuration set containing only the configurations from +// {@code configs} which are in a {@link RuleStopState}. If all +// configurations in {@code configs} are already in a rule stop state, p +// method simply returns {@code configs}. +// +//

When {@code lookToEndOfRule} is true, p method uses +// {@link ATN//NextTokens} for each configuration in {@code configs} which is +// not already in a rule stop state to see if a rule stop state is reachable +// from the configuration via epsilon-only transitions.

+// +// @param configs the configuration set to update +// @param lookToEndOfRule when true, p method checks for rule stop states +// reachable by epsilon-only transitions from each configuration in +// {@code configs}. +// +// @return {@code configs} if all configurations in {@code configs} are in a +// rule stop state, otherwise return a Newconfiguration set containing only +// the configurations from {@code configs} which are in a rule stop state +// +func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { + if PredictionModeallConfigsInRuleStopStates(configs) { + return configs + } + result := NewBaseATNConfigSet(configs.FullContext()) + for _, config := range configs.GetItems() { + + _, ok := config.GetState().(*RuleStopState) + + if ok { + result.Add(config, p.mergeCache) + continue + } + if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { + NextTokens := p.atn.NextTokens(config.GetState(), nil) + if NextTokens.contains(TokenEpsilon) { + endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] + result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) + } + } + } + return result +} + +func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { + // always at least the implicit call to start rule + initialContext := predictionContextFromRuleContext(p.atn, ctx) + configs := NewBaseATNConfigSet(fullCtx) + for i := 0; i < len(a.GetTransitions()); i++ { + target := a.GetTransitions()[i].getTarget() + c := NewBaseATNConfig6(target, i+1, initialContext) + closureBusy := NewSet(nil, nil) + p.closure(c, configs, closureBusy, true, fullCtx, false) + } + return configs +} + +// +// This method transforms the start state computed by +// {@link //computeStartState} to the special start state used by a +// precedence DFA for a particular precedence value. The transformation +// process applies the following changes to the start state's configuration +// set. +// +//
    +//
  1. Evaluate the precedence predicates for each configuration using +// {@link SemanticContext//evalPrecedence}.
  2. +//
  3. Remove all configurations which predict an alternative greater than +// 1, for which another configuration that predicts alternative 1 is in the +// same ATN state with the same prediction context. This transformation is +// valid for the following reasons: +//
      +//
    • The closure block cannot contain any epsilon transitions which bypass +// the body of the closure, so all states reachable via alternative 1 are +// part of the precedence alternatives of the transformed left-recursive +// rule.
    • +//
    • The "primary" portion of a left recursive rule cannot contain an +// epsilon transition, so the only way an alternative other than 1 can exist +// in a state that is also reachable via alternative 1 is by nesting calls +// to the left-recursive rule, with the outer calls not being at the +// preferred precedence level.
    • +//
    +//
  4. +//
+// +//

+// The prediction context must be considered by p filter to address +// situations like the following. +//

+// +//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+// 
+//
+//

+// If the above grammar, the ATN state immediately before the token +// reference {@code 'a'} in {@code letterA} is reachable from the left edge +// of both the primary and closure blocks of the left-recursive rule +// {@code statement}. The prediction context associated with each of these +// configurations distinguishes between them, and prevents the alternative +// which stepped out to {@code prog} (and then back in to {@code statement} +// from being eliminated by the filter. +//

+// +// @param configs The configuration set computed by +// {@link //computeStartState} as the start state for the DFA. +// @return The transformed configuration set representing the start state +// for a precedence DFA at a particular precedence level (determined by +// calling {@link Parser//getPrecedence}). +// +func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { + + statesFromAlt1 := make(map[int]PredictionContext) + configSet := NewBaseATNConfigSet(configs.FullContext()) + + for _, config := range configs.GetItems() { + // handle alt 1 first + if config.GetAlt() != 1 { + continue + } + updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) + if updatedContext == nil { + // the configuration was eliminated + continue + } + statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() + if updatedContext != config.GetSemanticContext() { + configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) + } else { + configSet.Add(config, p.mergeCache) + } + } + for _, config := range configs.GetItems() { + + if config.GetAlt() == 1 { + // already handled + continue + } + // In the future, p elimination step could be updated to also + // filter the prediction context for alternatives predicting alt>1 + // (basically a graph subtraction algorithm). + if !config.getPrecedenceFilterSuppressed() { + context := statesFromAlt1[config.GetState().GetStateNumber()] + if context != nil && context.equals(config.GetContext()) { + // eliminated + continue + } + } + configSet.Add(config, p.mergeCache) + } + return configSet +} + +func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { + if trans.Matches(ttype, 0, p.atn.maxTokenType) { + return trans.getTarget() + } + + return nil +} + +func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { + + altToPred := make([]SemanticContext, nalts+1) + for _, c := range configs.GetItems() { + if ambigAlts.contains(c.GetAlt()) { + altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) + } + } + nPredAlts := 0 + for i := 1; i < nalts+1; i++ { + pred := altToPred[i] + if pred == nil { + altToPred[i] = SemanticContextNone + } else if pred != SemanticContextNone { + nPredAlts++ + } + } + // nonambig alts are nil in altToPred + if nPredAlts == 0 { + altToPred = nil + } + if ParserATNSimulatorDebug { + fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) + } + return altToPred +} + +func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { + pairs := make([]*PredPrediction, 0) + containsPredicate := false + for i := 1; i < len(altToPred); i++ { + pred := altToPred[i] + // unpredicated is indicated by SemanticContextNONE + if ambigAlts != nil && ambigAlts.contains(i) { + pairs = append(pairs, NewPredPrediction(pred, i)) + } + if pred != SemanticContextNone { + containsPredicate = true + } + } + if !containsPredicate { + return nil + } + return pairs +} + +// +// This method is used to improve the localization of error messages by +// choosing an alternative rather than panicing a +// {@link NoViableAltException} in particular prediction scenarios where the +// {@link //ERROR} state was reached during ATN simulation. +// +//

+// The default implementation of p method uses the following +// algorithm to identify an ATN configuration which successfully parsed the +// decision entry rule. Choosing such an alternative ensures that the +// {@link ParserRuleContext} returned by the calling rule will be complete +// and valid, and the syntax error will be Reported later at a more +// localized location.

+// +//
    +//
  • If a syntactically valid path or paths reach the end of the decision rule and +// they are semantically valid if predicated, return the min associated alt.
  • +//
  • Else, if a semantically invalid but syntactically valid path exist +// or paths exist, return the minimum associated alt. +//
  • +//
  • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
  • +//
+// +//

+// In some scenarios, the algorithm described above could predict an +// alternative which will result in a {@link FailedPredicateException} in +// the parser. Specifically, p could occur if the only configuration +// capable of successfully parsing to the end of the decision rule is +// blocked by a semantic predicate. By choosing p alternative within +// {@link //AdaptivePredict} instead of panicing a +// {@link NoViableAltException}, the resulting +// {@link FailedPredicateException} in the parser will identify the specific +// predicate which is preventing the parser from successfully parsing the +// decision rule, which helps developers identify and correct logic errors +// in semantic predicates. +//

+// +// @param configs The ATN configurations which were valid immediately before +// the {@link //ERROR} state was reached +// @param outerContext The is the \gamma_0 initial parser context from the paper +// or the parser stack at the instant before prediction commences. +// +// @return The value to return from {@link //AdaptivePredict}, or +// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not +// identified and {@link //AdaptivePredict} should Report an error instead. +// +func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { + cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) + semValidConfigs := cfgs[0] + semInvalidConfigs := cfgs[1] + alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) + if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists + return alt + } + // Is there a syntactically valid path with a failed pred? + if len(semInvalidConfigs.GetItems()) > 0 { + alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) + if alt != ATNInvalidAltNumber { // syntactically viable path exists + return alt + } + } + return ATNInvalidAltNumber +} + +func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { + alts := NewIntervalSet() + + for _, c := range configs.GetItems() { + _, ok := c.GetState().(*RuleStopState) + + if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { + alts.addOne(c.GetAlt()) + } + } + if alts.length() == 0 { + return ATNInvalidAltNumber + } + + return alts.first() +} + +// Walk the list of configurations and split them according to +// those that have preds evaluating to true/false. If no pred, assume +// true pred and include in succeeded set. Returns Pair of sets. +// +// Create a NewSet so as not to alter the incoming parameter. +// +// Assumption: the input stream has been restored to the starting point +// prediction, which is where predicates need to evaluate. + +type ATNConfigSetPair struct { + item0, item1 ATNConfigSet +} + +func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { + succeeded := NewBaseATNConfigSet(configs.FullContext()) + failed := NewBaseATNConfigSet(configs.FullContext()) + + for _, c := range configs.GetItems() { + if c.GetSemanticContext() != SemanticContextNone { + predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) + if predicateEvaluationResult { + succeeded.Add(c, nil) + } else { + failed.Add(c, nil) + } + } else { + succeeded.Add(c, nil) + } + } + return []ATNConfigSet{succeeded, failed} +} + +// Look through a list of predicate/alt pairs, returning alts for the +// pairs that win. A {@code NONE} predicate indicates an alt containing an +// unpredicated config which behaves as "always true." If !complete +// then we stop at the first predicate that evaluates to true. This +// includes pairs with nil predicates. +// +func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { + predictions := NewBitSet() + for i := 0; i < len(predPredictions); i++ { + pair := predPredictions[i] + if pair.pred == SemanticContextNone { + predictions.add(pair.alt) + if !complete { + break + } + continue + } + + predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) + if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { + fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) + } + if predicateEvaluationResult { + if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { + fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) + } + predictions.add(pair.alt) + if !complete { + break + } + } + } + return predictions +} + +func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { + initialDepth := 0 + p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, + fullCtx, initialDepth, treatEOFAsEpsilon) +} + +func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + + if ParserATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") + fmt.Println("configs(" + configs.String() + ")") + if config.GetReachesIntoOuterContext() > 50 { + panic("problem") + } + } + + _, ok := config.GetState().(*RuleStopState) + if ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !config.GetContext().isEmpty() { + for i := 0; i < config.GetContext().length(); i++ { + if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if ParserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[config.GetContext().getReturnState(i)] + newContext := config.GetContext().GetParent(i) // "pop" return state + + c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) + p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) + } + return + } else if fullCtx { + // reached end of start rule + configs.Add(config, p.mergeCache) + return + } else { + // else if we have no context info, just chase follow links (if greedy) + if ParserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + } + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) +} + +// Do the actual work of walking epsilon edges// +func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + state := config.GetState() + // optimization + if !state.GetEpsilonOnlyTransitions() { + configs.Add(config, p.mergeCache) + // make sure to not return here, because EOF transitions can act as + // both epsilon transitions and non-epsilon transitions. + } + for i := 0; i < len(state.GetTransitions()); i++ { + t := state.GetTransitions()[i] + _, ok := t.(*ActionTransition) + continueCollecting := collectPredicates && !ok + c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) + if ci, ok := c.(*BaseATNConfig); ok && ci != nil { + if !t.getIsEpsilon() && closureBusy.add(c) != c { + // avoid infinite recursion for EOF* and EOF+ + continue + } + newDepth := depth + + if _, ok := config.GetState().(*RuleStopState); ok { + + // target fell off end of rule mark resulting c as having dipped into outer context + // We can't get here if incoming config was rule stop and we had context + // track how far we dip into outer context. Might + // come in handy and we avoid evaluating context dependent + // preds if p is > 0. + + if closureBusy.add(c) != c { + // avoid infinite recursion for right-recursive rules + continue + } + + if p.dfa != nil && p.dfa.precedenceDfa { + if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { + c.setPrecedenceFilterSuppressed(true) + } + } + + c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) + configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method + newDepth-- + if ParserATNSimulatorDebug { + fmt.Println("dips into outer ctx: " + c.String()) + } + } else if _, ok := t.(*RuleTransition); ok { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if newDepth >= 0 { + newDepth++ + } + } + p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) + } + } +} + +func (p *ParserATNSimulator) getRuleName(index int) string { + if p.parser != nil && index >= 0 { + return p.parser.GetRuleNames()[index] + } + + return "" +} + +func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig { + + switch t.getSerializationType() { + case TransitionRULE: + return p.ruleTransition(config, t.(*RuleTransition)) + case TransitionPRECEDENCE: + return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionPREDICATE: + return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionACTION: + return p.actionTransition(config, t.(*ActionTransition)) + case TransitionEPSILON: + return NewBaseATNConfig4(config, t.getTarget()) + case TransitionATOM: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEOFAsEpsilon { + if t.Matches(TokenEOF, 0, 1) { + return NewBaseATNConfig4(config, t.getTarget()) + } + } + return nil + case TransitionRANGE: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEOFAsEpsilon { + if t.Matches(TokenEOF, 0, 1) { + return NewBaseATNConfig4(config, t.getTarget()) + } + } + return nil + case TransitionSET: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEOFAsEpsilon { + if t.Matches(TokenEOF, 0, 1) { + return NewBaseATNConfig4(config, t.getTarget()) + } + } + return nil + default: + return nil + } +} + +func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig { + if ParserATNSimulatorDebug { + fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) + } + return NewBaseATNConfig4(config, t.getTarget()) +} + +func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, + pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { + + if ParserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + + strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *BaseATNConfig + if collectPredicates && inContext { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewBaseATNConfig4(config, pt.getTarget()) + } + if ParserATNSimulatorDebug { + fmt.Println("config from pred transition=" + c.String()) + } + return c +} + +func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { + + if ParserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + + ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *BaseATNConfig + if collectPredicates && ((pt.isCtxDependent && inContext) || !pt.isCtxDependent) { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewBaseATNConfig4(config, pt.getTarget()) + } + if ParserATNSimulatorDebug { + fmt.Println("config from pred transition=" + c.String()) + } + return c +} + +func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig { + if ParserATNSimulatorDebug { + fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) + } + returnState := t.followState + newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) + return NewBaseATNConfig1(config, t.getTarget(), newContext) +} + +func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModeGetAlts(altsets) +} + +// Sam pointed out a problem with the previous definition, v3, of +// ambiguous states. If we have another state associated with conflicting +// alternatives, we should keep going. For example, the following grammar +// +// s : (ID | ID ID?) '' +// +// When the ATN simulation reaches the state before '', it has a DFA +// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally +// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node +// because alternative to has another way to continue, via [6|2|[]]. +// The key is that we have a single state that has config's only associated +// with a single alternative, 2, and crucially the state transitions +// among the configurations are all non-epsilon transitions. That means +// we don't consider any conflicts that include alternative 2. So, we +// ignore the conflict between alts 1 and 2. We ignore a set of +// conflicting alts when there is an intersection with an alternative +// associated with a single alt state in the state&rarrconfig-list map. +// +// It's also the case that we might have two conflicting configurations but +// also a 3rd nonconflicting configuration for a different alternative: +// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: +// +// a : A | A | A B +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue and so we do not +// stop working on p state. In the previous example, we're concerned +// with states associated with the conflicting alternatives. Here alt +// 3 is not associated with the conflicting configs, but since we can continue +// looking for input reasonably, I don't declare the state done. We +// ignore a set of conflicting alts when we have an alternative +// that we still need to pursue. +// + +func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { + var conflictingAlts *BitSet + if configs.GetUniqueAlt() != ATNInvalidAltNumber { + conflictingAlts = NewBitSet() + conflictingAlts.add(configs.GetUniqueAlt()) + } else { + conflictingAlts = configs.GetConflictingAlts() + } + return conflictingAlts +} + +func (p *ParserATNSimulator) GetTokenName(t int) string { + if t == TokenEOF { + return "EOF" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil { + if t >= len(p.parser.GetLiteralNames()) { + fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ",")) + // fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect + } else { + return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" + } + } + + return strconv.Itoa(t) +} + +func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { + return p.GetTokenName(input.LA(1)) +} + +// Used for debugging in AdaptivePredict around execATN but I cut +// it out for clarity now that alg. works well. We can leave p +// "dead" code for a bit. +// +func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { + + panic("Not implemented") + + // fmt.Println("dead end configs: ") + // var decs = nvae.deadEndConfigs + // + // for i:=0; i0) { + // var t = c.state.GetTransitions()[0] + // if t2, ok := t.(*AtomTransition); ok { + // trans = "Atom "+ p.GetTokenName(t2.label) + // } else if t3, ok := t.(SetTransition); ok { + // _, ok := t.(*NotSetTransition) + // + // var s string + // if (ok){ + // s = "~" + // } + // + // trans = s + "Set " + t3.set + // } + // } + // fmt.Errorf(c.String(p.parser, true) + ":" + trans) + // } +} + +func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException { + return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) +} + +func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { + alt := ATNInvalidAltNumber + for _, c := range configs.GetItems() { + if alt == ATNInvalidAltNumber { + alt = c.GetAlt() // found first alt + } else if c.GetAlt() != alt { + return ATNInvalidAltNumber + } + } + return alt +} + +// +// Add an edge to the DFA, if possible. This method calls +// {@link //addDFAState} to ensure the {@code to} state is present in the +// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the +// range of edges that can be represented in the DFA tables, p method +// returns without adding the edge to the DFA. +// +//

If {@code to} is {@code nil}, p method returns {@code nil}. +// Otherwise, p method returns the {@link DFAState} returned by calling +// {@link //addDFAState} for the {@code to} state.

+// +// @param dfa The DFA +// @param from The source state for the edge +// @param t The input symbol +// @param to The target state for the edge +// +// @return If {@code to} is {@code nil}, p method returns {@code nil} +// otherwise p method returns the result of calling {@link //addDFAState} +// on {@code to} +// +func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { + if ParserATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) + } + if to == nil { + return nil + } + to = p.addDFAState(dfa, to) // used existing if possible not incoming + if from == nil || t < -1 || t > p.atn.maxTokenType { + return to + } + if from.edges == nil { + from.edges = make([]*DFAState, p.atn.maxTokenType+1+1) + } + from.edges[t+1] = to // connect + + if ParserATNSimulatorDebug { + var names []string + if p.parser != nil { + names = p.parser.GetLiteralNames() + } + + fmt.Println("DFA=\n" + dfa.String(names, nil)) + } + return to +} + +// +// Add state {@code D} to the DFA if it is not already present, and return +// the actual instance stored in the DFA. If a state equivalent to {@code D} +// is already in the DFA, the existing state is returned. Otherwise p +// method returns {@code D} after adding it to the DFA. +// +//

If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and +// does not change the DFA.

+// +// @param dfa The dfa +// @param D The DFA state to add +// @return The state stored in the DFA. This will be either the existing +// state if {@code D} is already in the DFA, or {@code D} itself if the +// state was not already present. +// +func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { + if d == ATNSimulatorError { + return d + } + hash := d.hash() + existing, ok := dfa.getState(hash) + if ok { + return existing + } + d.stateNumber = dfa.numStates() + if !d.configs.ReadOnly() { + d.configs.OptimizeConfigs(p.BaseATNSimulator) + d.configs.SetReadOnly(true) + } + dfa.setState(hash, d) + if ParserATNSimulatorDebug { + fmt.Println("adding NewDFA state: " + d.String()) + } + return d +} + +func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) + } +} + +// If context sensitive parsing, we know it's ambiguity not conflict// +func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, + exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go new file mode 100644 index 00000000000..49cd10c5ffc --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go @@ -0,0 +1,362 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "reflect" + "strconv" +) + +type ParserRuleContext interface { + RuleContext + + SetException(RecognitionException) + + AddTokenNode(token Token) *TerminalNodeImpl + AddErrorNode(badToken Token) *ErrorNodeImpl + + EnterRule(listener ParseTreeListener) + ExitRule(listener ParseTreeListener) + + SetStart(Token) + GetStart() Token + + SetStop(Token) + GetStop() Token + + AddChild(child RuleContext) RuleContext + RemoveLastChild() +} + +type BaseParserRuleContext struct { + *BaseRuleContext + + start, stop Token + exception RecognitionException + children []Tree +} + +func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { + prc := new(BaseParserRuleContext) + + prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) + + prc.RuleIndex = -1 + // * If we are debugging or building a parse tree for a Visitor, + // we need to track all of the tokens and rule invocations associated + // with prc rule's context. This is empty for parsing w/o tree constr. + // operation because we don't the need to track the details about + // how we parse prc rule. + // / + prc.children = nil + prc.start = nil + prc.stop = nil + // The exception that forced prc rule to return. If the rule successfully + // completed, prc is {@code nil}. + prc.exception = nil + + return prc +} + +func (prc *BaseParserRuleContext) SetException(e RecognitionException) { + prc.exception = e +} + +func (prc *BaseParserRuleContext) GetChildren() []Tree { + return prc.children +} + +func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) { + // from RuleContext + prc.parentCtx = ctx.parentCtx + prc.invokingState = ctx.invokingState + prc.children = nil + prc.start = ctx.start + prc.stop = ctx.stop +} + +func (prc *BaseParserRuleContext) GetText() string { + if prc.GetChildCount() == 0 { + return "" + } + + var s string + for _, child := range prc.children { + s += child.(ParseTree).GetText() + } + + return s +} + +// Double dispatch methods for listeners +func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { +} + +func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { +} + +// * Does not set parent link other add methods do that/// +func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { + if prc.children == nil { + prc.children = make([]Tree, 0) + } + if child == nil { + panic("Child may not be null") + } + prc.children = append(prc.children, child) + return child +} + +func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { + if prc.children == nil { + prc.children = make([]Tree, 0) + } + if child == nil { + panic("Child may not be null") + } + prc.children = append(prc.children, child) + return child +} + +// * Used by EnterOuterAlt to toss out a RuleContext previously added as +// we entered a rule. If we have // label, we will need to remove +// generic ruleContext object. +// / +func (prc *BaseParserRuleContext) RemoveLastChild() { + if prc.children != nil && len(prc.children) > 0 { + prc.children = prc.children[0 : len(prc.children)-1] + } +} + +func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl { + + node := NewTerminalNodeImpl(token) + prc.addTerminalNodeChild(node) + node.parentCtx = prc + return node + +} + +func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { + node := NewErrorNodeImpl(badToken) + prc.addTerminalNodeChild(node) + node.parentCtx = prc + return node +} + +func (prc *BaseParserRuleContext) GetChild(i int) Tree { + if prc.children != nil && len(prc.children) >= i { + return prc.children[i] + } + + return nil +} + +func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext { + if childType == nil { + return prc.GetChild(i).(RuleContext) + } + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if reflect.TypeOf(child) == childType { + if i == 0 { + return child.(RuleContext) + } + + i-- + } + } + + return nil +} + +func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string { + return TreesStringTree(prc, ruleNames, recog) +} + +func (prc *BaseParserRuleContext) GetRuleContext() RuleContext { + return prc +} + +func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} { + return visitor.VisitChildren(prc) +} + +func (prc *BaseParserRuleContext) SetStart(t Token) { + prc.start = t +} + +func (prc *BaseParserRuleContext) GetStart() Token { + return prc.start +} + +func (prc *BaseParserRuleContext) SetStop(t Token) { + prc.stop = t +} + +func (prc *BaseParserRuleContext) GetStop() Token { + return prc.stop +} + +func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if c2, ok := child.(TerminalNode); ok { + if c2.GetSymbol().GetTokenType() == ttype { + if i == 0 { + return c2 + } + + i-- + } + } + } + return nil +} + +func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { + if prc.children == nil { + return make([]TerminalNode, 0) + } + + tokens := make([]TerminalNode, 0) + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if tchild, ok := child.(TerminalNode); ok { + if tchild.GetSymbol().GetTokenType() == ttype { + tokens = append(tokens, tchild) + } + } + } + + return tokens +} + +func (prc *BaseParserRuleContext) GetPayload() interface{} { + return prc +} + +func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext { + if prc.children == nil || i < 0 || i >= len(prc.children) { + return nil + } + + j := -1 // what element have we found with ctxType? + for _, o := range prc.children { + + childType := reflect.TypeOf(o) + + if childType.Implements(ctxType) { + j++ + if j == i { + return o.(RuleContext) + } + } + } + return nil +} + +// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do +// check for convertibility + +func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext { + return prc.getChild(ctxType, i) +} + +func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext { + if prc.children == nil { + return make([]RuleContext, 0) + } + + contexts := make([]RuleContext, 0) + + for _, child := range prc.children { + childType := reflect.TypeOf(child) + + if childType.ConvertibleTo(ctxType) { + contexts = append(contexts, child.(RuleContext)) + } + } + return contexts +} + +func (prc *BaseParserRuleContext) GetChildCount() int { + if prc.children == nil { + return 0 + } + + return len(prc.children) +} + +func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { + if prc.start == nil || prc.stop == nil { + return TreeInvalidInterval + } + + return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex()) +} + +//need to manage circular dependencies, so export now + +// Print out a whole tree, not just a node, in LISP format +// (root child1 .. childN). Print just a node if b is a leaf. +// + +func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string { + + var p ParserRuleContext = prc + s := "[" + for p != nil && p != stop { + if ruleNames == nil { + if !p.IsEmpty() { + s += strconv.Itoa(p.GetInvokingState()) + } + } else { + ri := p.GetRuleIndex() + var ruleName string + if ri >= 0 && ri < len(ruleNames) { + ruleName = ruleNames[ri] + } else { + ruleName = strconv.Itoa(ri) + } + s += ruleName + } + if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) { + s += " " + } + pi := p.GetParent() + if pi != nil { + p = pi.(ParserRuleContext) + } else { + p = nil + } + } + s += "]" + return s +} + +var RuleContextEmpty = NewBaseParserRuleContext(nil, -1) + +type InterpreterRuleContext interface { + ParserRuleContext +} + +type BaseInterpreterRuleContext struct { + *BaseParserRuleContext +} + +func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { + + prc := new(BaseInterpreterRuleContext) + + prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber) + + prc.RuleIndex = ruleIndex + + return prc +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go new file mode 100644 index 00000000000..99acb333fa2 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go @@ -0,0 +1,756 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// Represents {@code $} in local context prediction, which means wildcard. +// {@code//+x =//}. +// / +const ( + BasePredictionContextEmptyReturnState = 0x7FFFFFFF +) + +// Represents {@code $} in an array in full context mode, when {@code $} +// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, +// {@code $} = {@link //EmptyReturnState}. +// / + +var ( + BasePredictionContextglobalNodeCount = 1 + BasePredictionContextid = BasePredictionContextglobalNodeCount +) + +type PredictionContext interface { + hash() int + GetParent(int) PredictionContext + getReturnState(int) int + equals(PredictionContext) bool + length() int + isEmpty() bool + hasEmptyPath() bool + String() string +} + +type BasePredictionContext struct { + cachedHash int +} + +func NewBasePredictionContext(cachedHash int) *BasePredictionContext { + pc := new(BasePredictionContext) + pc.cachedHash = cachedHash + + return pc +} + +func (b *BasePredictionContext) isEmpty() bool { + return false +} + +func calculateHash(parent PredictionContext, returnState int) int { + h := murmurInit(1) + h = murmurUpdate(h, parent.hash()) + h = murmurUpdate(h, returnState) + return murmurFinish(h, 2) +} + +func calculateEmptyHash() int { + h := murmurInit(1) + return murmurFinish(h, 0) +} + +// Used to cache {@link BasePredictionContext} objects. Its used for the shared +// context cash associated with contexts in DFA states. This cache +// can be used for both lexers and parsers. + +type PredictionContextCache struct { + cache map[PredictionContext]PredictionContext +} + +func NewPredictionContextCache() *PredictionContextCache { + t := new(PredictionContextCache) + t.cache = make(map[PredictionContext]PredictionContext) + return t +} + +// Add a context to the cache and return it. If the context already exists, +// return that one instead and do not add a Newcontext to the cache. +// Protect shared cache from unsafe thread access. +// +func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { + if ctx == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY + } + existing := p.cache[ctx] + if existing != nil { + return existing + } + p.cache[ctx] = ctx + return ctx +} + +func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { + return p.cache[ctx] +} + +func (p *PredictionContextCache) length() int { + return len(p.cache) +} + +type SingletonPredictionContext interface { + PredictionContext +} + +type BaseSingletonPredictionContext struct { + *BasePredictionContext + + parentCtx PredictionContext + returnState int +} + +func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { + + s := new(BaseSingletonPredictionContext) + s.BasePredictionContext = NewBasePredictionContext(37) + + if parent != nil { + s.cachedHash = calculateHash(parent, returnState) + } else { + s.cachedHash = calculateEmptyHash() + } + + s.parentCtx = parent + s.returnState = returnState + + return s +} + +func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { + if returnState == BasePredictionContextEmptyReturnState && parent == nil { + // someone can pass in the bits of an array ctx that mean $ + return BasePredictionContextEMPTY + } + + return NewBaseSingletonPredictionContext(parent, returnState) +} + +func (b *BaseSingletonPredictionContext) length() int { + return 1 +} + +func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { + return b.parentCtx +} + +func (b *BaseSingletonPredictionContext) getReturnState(index int) int { + return b.returnState +} + +func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { + return b.returnState == BasePredictionContextEmptyReturnState +} + +func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool { + if b == other { + return true + } else if _, ok := other.(*BaseSingletonPredictionContext); !ok { + return false + } else if b.hash() != other.hash() { + return false // can't be same if hash is different + } + + otherP := other.(*BaseSingletonPredictionContext) + + if b.returnState != other.getReturnState(0) { + return false + } else if b.parentCtx == nil { + return otherP.parentCtx == nil + } + + return b.parentCtx.equals(otherP.parentCtx) +} + +func (b *BaseSingletonPredictionContext) hash() int { + h := murmurInit(1) + + if b.parentCtx == nil { + return murmurFinish(h, 0) + } + + h = murmurUpdate(h, b.parentCtx.hash()) + h = murmurUpdate(h, b.returnState) + return murmurFinish(h, 2) +} + +func (b *BaseSingletonPredictionContext) String() string { + var up string + + if b.parentCtx == nil { + up = "" + } else { + up = b.parentCtx.String() + } + + if len(up) == 0 { + if b.returnState == BasePredictionContextEmptyReturnState { + return "$" + } + + return strconv.Itoa(b.returnState) + } + + return strconv.Itoa(b.returnState) + " " + up +} + +var BasePredictionContextEMPTY = NewEmptyPredictionContext() + +type EmptyPredictionContext struct { + *BaseSingletonPredictionContext +} + +func NewEmptyPredictionContext() *EmptyPredictionContext { + + p := new(EmptyPredictionContext) + + p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) + + return p +} + +func (e *EmptyPredictionContext) isEmpty() bool { + return true +} + +func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { + return nil +} + +func (e *EmptyPredictionContext) getReturnState(index int) int { + return e.returnState +} + +func (e *EmptyPredictionContext) equals(other PredictionContext) bool { + return e == other +} + +func (e *EmptyPredictionContext) String() string { + return "$" +} + +type ArrayPredictionContext struct { + *BasePredictionContext + + parents []PredictionContext + returnStates []int +} + +func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { + // Parent can be nil only if full ctx mode and we make an array + // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // nil parent and + // returnState == {@link //EmptyReturnState}. + + c := new(ArrayPredictionContext) + c.BasePredictionContext = NewBasePredictionContext(37) + + for i := range parents { + c.cachedHash += calculateHash(parents[i], returnStates[i]) + } + + c.parents = parents + c.returnStates = returnStates + + return c +} + +func (a *ArrayPredictionContext) GetReturnStates() []int { + return a.returnStates +} + +func (a *ArrayPredictionContext) hasEmptyPath() bool { + return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState +} + +func (a *ArrayPredictionContext) isEmpty() bool { + // since EmptyReturnState can only appear in the last position, we + // don't need to verify that size==1 + return a.returnStates[0] == BasePredictionContextEmptyReturnState +} + +func (a *ArrayPredictionContext) length() int { + return len(a.returnStates) +} + +func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { + return a.parents[index] +} + +func (a *ArrayPredictionContext) getReturnState(index int) int { + return a.returnStates[index] +} + +func (a *ArrayPredictionContext) equals(other PredictionContext) bool { + if _, ok := other.(*ArrayPredictionContext); !ok { + return false + } else if a.cachedHash != other.hash() { + return false // can't be same if hash is different + } else { + otherP := other.(*ArrayPredictionContext) + return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents + } +} + +func (a *ArrayPredictionContext) hash() int { + h := murmurInit(1) + + for _, p := range a.parents { + h = murmurUpdate(h, p.hash()) + } + + for _, r := range a.returnStates { + h = murmurUpdate(h, r) + } + + return murmurFinish(h, 2 * len(a.parents)) +} + +func (a *ArrayPredictionContext) String() string { + if a.isEmpty() { + return "[]" + } + + s := "[" + for i := 0; i < len(a.returnStates); i++ { + if i > 0 { + s = s + ", " + } + if a.returnStates[i] == BasePredictionContextEmptyReturnState { + s = s + "$" + continue + } + s = s + strconv.Itoa(a.returnStates[i]) + if a.parents[i] != nil { + s = s + " " + a.parents[i].String() + } else { + s = s + "nil" + } + } + + return s + "]" +} + +// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. +// Return {@link //EMPTY} if {@code outerContext} is empty or nil. +// / +func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { + if outerContext == nil { + outerContext = RuleContextEmpty + } + // if we are in RuleContext of start rule, s, then BasePredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if outerContext.GetParent() == nil || outerContext == RuleContextEmpty { + return BasePredictionContextEMPTY + } + // If we have a parent, convert it to a BasePredictionContext graph + parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) + state := a.states[outerContext.GetInvokingState()] + transition := state.GetTransitions()[0] + + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) +} + +func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { + // share same graph if both same + if a == b { + return a + } + + ac, ok1 := a.(*BaseSingletonPredictionContext) + bc, ok2 := b.(*BaseSingletonPredictionContext) + + if ok1 && ok2 { + return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) + } + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as// wildcard + if rootIsWildcard { + if _, ok := a.(*EmptyPredictionContext); ok { + return a + } + if _, ok := b.(*EmptyPredictionContext); ok { + return b + } + } + // convert singleton so both are arrays to normalize + if _, ok := a.(*BaseSingletonPredictionContext); ok { + a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) + } + if _, ok := b.(*BaseSingletonPredictionContext); ok { + b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) + } + return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache) +} + +// +// Merge two {@link SingletonBasePredictionContext} instances. +// +//

Stack tops equal, parents merge is same return left graph.
+//

+// +//

Same stack top, parents differ merge parents giving array node, then +// remainders of those graphs. A Newroot node is created to point to the +// merged parents.
+//

+// +//

Different stack tops pointing to same parent. Make array node for the +// root where both element in the root point to the same (original) +// parent.
+//

+// +//

Different stack tops pointing to different parents. Make array node for +// the root where each element points to the corresponding original +// parent.
+//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// @param mergeCache +// / +func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { + if mergeCache != nil { + previous := mergeCache.Get(a.hash(), b.hash()) + if previous != nil { + return previous.(PredictionContext) + } + previous = mergeCache.Get(b.hash(), a.hash()) + if previous != nil { + return previous.(PredictionContext) + } + } + + rootMerge := mergeRoot(a, b, rootIsWildcard) + if rootMerge != nil { + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), rootMerge) + } + return rootMerge + } + if a.returnState == b.returnState { + parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) + // if parent is same as existing a or b parent or reduced to a parent, + // return it + if parent == a.parentCtx { + return a // ax + bx = ax, if a=b + } + if parent == b.parentCtx { + return b // ax + bx = bx, if a=b + } + // else: ax + ay = a'[x,y] + // merge parents x and y, giving array node with x,y then remainders + // of those graphs. dup a, a' points at merged array + // Newjoined parent so create Newsingleton pointing to it, a' + spc := SingletonBasePredictionContextCreate(parent, a.returnState) + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), spc) + } + return spc + } + // a != b payloads differ + // see if we can collapse parents due to $+x parents if local ctx + var singleParent PredictionContext + if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax + + // bx = + // [a,b]x + singleParent = a.parentCtx + } + if singleParent != nil { // parents are same + // sort payloads and use same parent + payloads := []int{a.returnState, b.returnState} + if a.returnState > b.returnState { + payloads[0] = b.returnState + payloads[1] = a.returnState + } + parents := []PredictionContext{singleParent, singleParent} + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), apc) + } + return apc + } + // parents differ and can't merge them. Just pack together + // into array can't merge. + // ax + by = [ax,by] + payloads := []int{a.returnState, b.returnState} + parents := []PredictionContext{a.parentCtx, b.parentCtx} + if a.returnState > b.returnState { // sort by payload + payloads[0] = b.returnState + payloads[1] = a.returnState + parents = []PredictionContext{b.parentCtx, a.parentCtx} + } + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), apc) + } + return apc +} + +// +// Handle case where at least one of {@code a} or {@code b} is +// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used +// to represent {@link //EMPTY}. +// +//

Local-Context Merges

+// +//

These local-context merge operations are used when {@code rootIsWildcard} +// is true.

+// +//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//

+// +//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is +// {@code //EMPTY} return left graph.
+//

+// +//

Special case of last merge if local context.
+//

+// +//

Full-Context Merges

+// +//

These full-context merge operations are used when {@code rootIsWildcard} +// is false.

+// +//

+// +//

Must keep all contexts {@link //EMPTY} in array is a special value (and +// nil parent).
+//

+// +//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// / +func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext { + if rootIsWildcard { + if a == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY // // + b =// + } + if b == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY // a +// =// + } + } else { + if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY // $ + $ = $ + } else if a == BasePredictionContextEMPTY { // $ + x = [$,x] + payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []PredictionContext{b.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) + payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []PredictionContext{a.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } + } + return nil +} + +// +// Merge two {@link ArrayBasePredictionContext} instances. +// +//

Different tops, different parents.
+//

+// +//

Shared top, same parents.
+//

+// +//

Shared top, different parents.
+//

+// +//

Shared top, all shared parents.
+//

+// +//

Equal tops, merge parents and reduce top to +// {@link SingletonBasePredictionContext}.
+//

+// / +func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { + if mergeCache != nil { + previous := mergeCache.Get(a.hash(), b.hash()) + if previous != nil { + return previous.(PredictionContext) + } + previous = mergeCache.Get(b.hash(), a.hash()) + if previous != nil { + return previous.(PredictionContext) + } + } + // merge sorted payloads a + b => M + i := 0 // walks a + j := 0 // walks b + k := 0 // walks target M array + + mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) + mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates)) + // walk and merge to yield mergedParents, mergedReturnStates + for i < len(a.returnStates) && j < len(b.returnStates) { + aParent := a.parents[i] + bParent := b.parents[j] + if a.returnStates[i] == b.returnStates[j] { + // same payload (stack tops are equal), must yield merged singleton + payload := a.returnStates[i] + // $+$ = $ + bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil + axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax + // -> + // ax + if bothDollars || axAX { + mergedParents[k] = aParent // choose left + mergedReturnStates[k] = payload + } else { // ax+ay -> a'[x,y] + mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) + mergedParents[k] = mergedParent + mergedReturnStates[k] = payload + } + i++ // hop over left one as usual + j++ // but also Skip one in right side since we merge + } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M + mergedParents[k] = aParent + mergedReturnStates[k] = a.returnStates[i] + i++ + } else { // b > a, copy b[j] to M + mergedParents[k] = bParent + mergedReturnStates[k] = b.returnStates[j] + j++ + } + k++ + } + // copy over any payloads remaining in either array + if i < len(a.returnStates) { + for p := i; p < len(a.returnStates); p++ { + mergedParents[k] = a.parents[p] + mergedReturnStates[k] = a.returnStates[p] + k++ + } + } else { + for p := j; p < len(b.returnStates); p++ { + mergedParents[k] = b.parents[p] + mergedReturnStates[k] = b.returnStates[p] + k++ + } + } + // trim merged if we combined a few that had same stack tops + if k < len(mergedParents) { // write index < last position trim + if k == 1 { // for just one merged element, return singleton top + pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), pc) + } + return pc + } + mergedParents = mergedParents[0:k] + mergedReturnStates = mergedReturnStates[0:k] + } + + M := NewArrayPredictionContext(mergedParents, mergedReturnStates) + + // if we created same array as a or b, return that instead + // TODO: track whether this is possible above during merge sort for speed + if M == a { + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), a) + } + return a + } + if M == b { + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), b) + } + return b + } + combineCommonParents(mergedParents) + + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), M) + } + return M +} + +// +// Make pass over all M {@code parents} merge any {@code equals()} +// ones. +// / +func combineCommonParents(parents []PredictionContext) { + uniqueParents := make(map[PredictionContext]PredictionContext) + + for p := 0; p < len(parents); p++ { + parent := parents[p] + if uniqueParents[parent] == nil { + uniqueParents[parent] = parent + } + } + for q := 0; q < len(parents); q++ { + parents[q] = uniqueParents[parents[q]] + } +} + +func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext { + + if context.isEmpty() { + return context + } + existing := visited[context] + if existing != nil { + return existing + } + existing = contextCache.Get(context) + if existing != nil { + visited[context] = existing + return existing + } + changed := false + parents := make([]PredictionContext, context.length()) + for i := 0; i < len(parents); i++ { + parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) + if changed || parent != context.GetParent(i) { + if !changed { + parents = make([]PredictionContext, context.length()) + for j := 0; j < context.length(); j++ { + parents[j] = context.GetParent(j) + } + changed = true + } + parents[i] = parent + } + } + if !changed { + contextCache.add(context) + visited[context] = context + return context + } + var updated PredictionContext + if len(parents) == 0 { + updated = BasePredictionContextEMPTY + } else if len(parents) == 1 { + updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) + } else { + updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates()) + } + contextCache.add(updated) + visited[updated] = updated + visited[context] = updated + + return updated +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go new file mode 100644 index 00000000000..15718f912bc --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go @@ -0,0 +1,553 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// This enumeration defines the prediction modes available in ANTLR 4 along with +// utility methods for analyzing configuration sets for conflicts and/or +// ambiguities. + +const ( + // + // The SLL(*) prediction mode. This prediction mode ignores the current + // parser context when making predictions. This is the fastest prediction + // mode, and provides correct results for many grammars. This prediction + // mode is more powerful than the prediction mode provided by ANTLR 3, but + // may result in syntax errors for grammar and input combinations which are + // not SLL. + // + //

+ // When using this prediction mode, the parser will either return a correct + // parse tree (i.e. the same parse tree that would be returned with the + // {@link //LL} prediction mode), or it will Report a syntax error. If a + // syntax error is encountered when using the {@link //SLL} prediction mode, + // it may be due to either an actual syntax error in the input or indicate + // that the particular combination of grammar and input requires the more + // powerful {@link //LL} prediction abilities to complete successfully.

+ // + //

+ // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.

+ // + PredictionModeSLL = 0 + // + // The LL(*) prediction mode. This prediction mode allows the current parser + // context to be used for resolving SLL conflicts that occur during + // prediction. This is the fastest prediction mode that guarantees correct + // parse results for all combinations of grammars with syntactically correct + // inputs. + // + //

+ // When using this prediction mode, the parser will make correct decisions + // for all syntactically-correct grammar and input combinations. However, in + // cases where the grammar is truly ambiguous this prediction mode might not + // Report a precise answer for exactly which alternatives are + // ambiguous.

+ // + //

+ // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.

+ // + PredictionModeLL = 1 + // + // The LL(*) prediction mode with exact ambiguity detection. In addition to + // the correctness guarantees provided by the {@link //LL} prediction mode, + // this prediction mode instructs the prediction algorithm to determine the + // complete and exact set of ambiguous alternatives for every ambiguous + // decision encountered while parsing. + // + //

+ // This prediction mode may be used for diagnosing ambiguities during + // grammar development. Due to the performance overhead of calculating sets + // of ambiguous alternatives, this prediction mode should be avoided when + // the exact results are not necessary.

+ // + //

+ // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.

+ // + PredictionModeLLExactAmbigDetection = 2 +) + +// +// Computes the SLL prediction termination condition. +// +//

+// This method computes the SLL prediction termination condition for both of +// the following cases.

+// +//
    +//
  • The usual SLL+LL fallback upon SLL conflict
  • +//
  • Pure SLL without LL fallback
  • +//
+// +//

COMBINED SLL+LL PARSING

+// +//

When LL-fallback is enabled upon SLL conflict, correct predictions are +// ensured regardless of how the termination condition is computed by this +// method. Due to the substantially higher cost of LL prediction, the +// prediction should only fall back to LL when the additional lookahead +// cannot lead to a unique SLL prediction.

+// +//

Assuming combined SLL+LL parsing, an SLL configuration set with only +// conflicting subsets should fall back to full LL, even if the +// configuration sets don't resolve to the same alternative (e.g. +// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting +// configuration, SLL could continue with the hopes that more lookahead will +// resolve via one of those non-conflicting configurations.

+// +//

Here's the prediction termination rule them: SLL (for SLL+LL parsing) +// stops when it sees only conflicting configuration subsets. In contrast, +// full LL keeps going when there is uncertainty.

+// +//

HEURISTIC

+// +//

As a heuristic, we stop prediction when we see any conflicting subset +// unless we see a state that only has one alternative associated with it. +// The single-alt-state thing lets prediction continue upon rules like +// (otherwise, it would admit defeat too soon):

+// +//

{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }

+// +//

When the ATN simulation reaches the state before {@code ''}, it has a +// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally +// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop +// processing this node because alternative to has another way to continue, +// via {@code [6|2|[]]}.

+// +//

It also let's us continue for this rule:

+// +//

{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }

+// +//

After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue and so we do not stop +// working on this state. In the previous example, we're concerned with +// states associated with the conflicting alternatives. Here alt 3 is not +// associated with the conflicting configs, but since we can continue +// looking for input reasonably, don't declare the state done.

+// +//

PURE SLL PARSING

+// +//

To handle pure SLL parsing, all we have to do is make sure that we +// combine stack contexts for configurations that differ only by semantic +// predicate. From there, we can do the usual SLL termination heuristic.

+// +//

PREDICATES IN SLL+LL PARSING

+// +//

SLL decisions don't evaluate predicates until after they reach DFA stop +// states because they need to create the DFA cache that works in all +// semantic situations. In contrast, full LL evaluates predicates collected +// during start state computation so it can ignore predicates thereafter. +// This means that SLL termination detection can totally ignore semantic +// predicates.

+// +//

Implementation-wise, {@link ATNConfigSet} combines stack contexts but not +// semantic predicate contexts so we might see two configurations like the +// following.

+// +//

{@code (s, 1, x, {}), (s, 1, x', {p})}

+// +//

Before testing these configurations against others, we have to merge +// {@code x} and {@code x'} (without modifying the existing configurations). +// For example, we test {@code (x+x')==x''} when looking for conflicts in +// the following configurations.

+// +//

{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}

+// +//

If the configuration set has predicates (as indicated by +// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of +// the configurations to strip out all of the predicates so that a standard +// {@link ATNConfigSet} will merge everything ignoring predicates.

+// +func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to Match additional input so we terminate prediction. + // + if PredictionModeallConfigsInRuleStopStates(configs) { + return true + } + // pure SLL mode parsing + if mode == PredictionModeSLL { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL costs more time + // since we'll often fail over anyway. + if configs.HasSemanticContext() { + // dup configs, tossing out semantic predicates + dup := NewBaseATNConfigSet(false) + for _, c := range configs.GetItems() { + + // NewBaseATNConfig({semanticContext:}, c) + c = NewBaseATNConfig2(c, SemanticContextNone) + dup.Add(c, nil) + } + configs = dup + } + // now we have combined contexts for configs with dissimilar preds + } + // pure SLL or combined SLL+LL mode parsing + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) +} + +// Checks if any configuration in {@code configs} is in a +// {@link RuleStopState}. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// @param configs the configuration set to test +// @return {@code true} if any configuration in {@code configs} is in a +// {@link RuleStopState}, otherwise {@code false} +func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { + for _, c := range configs.GetItems() { + if _, ok := c.GetState().(*RuleStopState); ok { + return true + } + } + return false +} + +// Checks if all configurations in {@code configs} are in a +// {@link RuleStopState}. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// @param configs the configuration set to test +// @return {@code true} if all configurations in {@code configs} are in a +// {@link RuleStopState}, otherwise {@code false} +func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { + + for _, c := range configs.GetItems() { + if _, ok := c.GetState().(*RuleStopState); !ok { + return false + } + } + return true +} + +// +// Full LL prediction termination. +// +//

Can we stop looking ahead during ATN simulation or is there some +// uncertainty as to which alternative we will ultimately pick, after +// consuming more input? Even if there are partial conflicts, we might know +// that everything is going to resolve to the same minimum alternative. That +// means we can stop since no more lookahead will change that fact. On the +// other hand, there might be multiple conflicts that resolve to different +// minimums. That means we need more look ahead to decide which of those +// alternatives we should predict.

+// +//

The basic idea is to split the set of configurations {@code C}, into +// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with +// non-conflicting configurations. Two configurations conflict if they have +// identical {@link ATNConfig//state} and {@link ATNConfig//context} values +// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} +// and {@code (s, j, ctx, _)} for {@code i!=j}.

+// +//

Reduce these configuration subsets to the set of possible alternatives. +// You can compute the alternative subsets in one pass as follows:

+// +//

{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in +// {@code C} holding {@code s} and {@code ctx} fixed.

+// +//

Or in pseudo-code, for each configuration {@code c} in {@code C}:

+// +//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+// 
+// +//

The values in {@code map} are the set of {@code A_s,ctx} sets.

+// +//

If {@code |A_s,ctx|=1} then there is no conflict associated with +// {@code s} and {@code ctx}.

+// +//

Reduce the subsets to singletons by choosing a minimum of each subset. If +// the union of these alternative subsets is a singleton, then no amount of +// more lookahead will help us. We will always pick that alternative. If, +// however, there is more than one alternative, then we are uncertain which +// alternative to predict and must continue looking for resolution. We may +// or may not discover an ambiguity in the future, even if there are no +// conflicting subsets this round.

+// +//

The biggest sin is to terminate early because it means we've made a +// decision but were uncertain as to the eventual outcome. We haven't used +// enough lookahead. On the other hand, announcing a conflict too late is no +// big deal you will still have the conflict. It's just inefficient. It +// might even look until the end of file.

+// +//

No special consideration for semantic predicates is required because +// predicates are evaluated on-the-fly for full LL prediction, ensuring that +// no configuration contains a semantic context during the termination +// check.

+// +//

CONFLICTING CONFIGS

+// +//

Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict +// when {@code i!=j} but {@code x=x'}. Because we merge all +// {@code (s, i, _)} configurations together, that means that there are at +// most {@code n} configurations associated with state {@code s} for +// {@code n} possible alternatives in the decision. The merged stacks +// complicate the comparison of configuration contexts {@code x} and +// {@code x'}. Sam checks to see if one is a subset of the other by calling +// merge and checking to see if the merged result is either {@code x} or +// {@code x'}. If the {@code x} associated with lowest alternative {@code i} +// is the superset, then {@code i} is the only possible prediction since the +// others resolve to {@code min(i)} as well. However, if {@code x} is +// associated with {@code j>i} then at least one stack configuration for +// {@code j} is not in conflict with alternative {@code i}. The algorithm +// should keep going, looking for more lookahead due to the uncertainty.

+// +//

For simplicity, I'm doing a equality check between {@code x} and +// {@code x'} that lets the algorithm continue to consume lookahead longer +// than necessary. The reason I like the equality is of course the +// simplicity but also because that is the test you need to detect the +// alternatives that are actually in conflict.

+// +//

CONTINUE/STOP RULE

+// +//

Continue if union of resolved alternative sets from non-conflicting and +// conflicting alternative subsets has more than one alternative. We are +// uncertain about which alternative to predict.

+// +//

The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which +// alternatives are still in the running for the amount of input we've +// consumed at this point. The conflicting sets let us to strip away +// configurations that won't lead to more states because we resolve +// conflicts to the configuration with a minimum alternate for the +// conflicting set.

+// +//

CASES

+// +//
    +// +//
  • no conflicts and more than 1 alternative in set => continue
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, +// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set +// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = +// {@code {1,3}} => continue +//
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, +// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set +// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = +// {@code {1}} => stop and predict 1
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, +// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U +// {@code {1}} = {@code {1}} => stop and predict 1, can announce +// ambiguity {@code {1,2}}
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, +// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U +// {@code {2}} = {@code {1,2}} => continue
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, +// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U +// {@code {3}} = {@code {1,3}} => continue
  • +// +//
+// +//

EXACT AMBIGUITY DETECTION

+// +//

If all states Report the same conflicting set of alternatives, then we +// know we have the exact ambiguity set.

+// +//

|A_i|>1 and +// A_i = A_j for all i, j.

+// +//

In other words, we continue examining lookahead until all {@code A_i} +// have more than one alternative and all {@code A_i} are the same. If +// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate +// because the resolved set is {@code {1}}. To determine what the real +// ambiguity is, we have to know whether the ambiguity is between one and +// two or one and three so we keep going. We can only stop prediction when +// we need exact ambiguity detection when the sets look like +// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

+// +func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { + return PredictionModegetSingleViableAlt(altsets) +} + +// +// Determines if every alternative subset in {@code altsets} contains more +// than one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if every {@link BitSet} in {@code altsets} has +// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} +// +func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { + return !PredictionModehasNonConflictingAltSet(altsets) +} + +// +// Determines if any single alternative subset in {@code altsets} contains +// exactly one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if {@code altsets} contains a {@link BitSet} with +// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} +// +func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() == 1 { + return true + } + } + return false +} + +// +// Determines if any single alternative subset in {@code altsets} contains +// more than one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if {@code altsets} contains a {@link BitSet} with +// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} +// +func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() > 1 { + return true + } + } + return false +} + +// +// Determines if every alternative subset in {@code altsets} is equivalent. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if every member of {@code altsets} is equal to the +// others, otherwise {@code false} +// +func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { + var first *BitSet + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if first == nil { + first = alts + } else if alts != first { + return false + } + } + + return true +} + +// +// Returns the unique alternative predicted by all alternative subsets in +// {@code altsets}. If no such alternative exists, this method returns +// {@link ATN//INVALID_ALT_NUMBER}. +// +// @param altsets a collection of alternative subsets +// +func PredictionModegetUniqueAlt(altsets []*BitSet) int { + all := PredictionModeGetAlts(altsets) + if all.length() == 1 { + return all.minValue() + } + + return ATNInvalidAltNumber +} + +// Gets the complete set of represented alternatives for a collection of +// alternative subsets. This method returns the union of each {@link BitSet} +// in {@code altsets}. +// +// @param altsets a collection of alternative subsets +// @return the set of represented alternatives in {@code altsets} +// +func PredictionModeGetAlts(altsets []*BitSet) *BitSet { + all := NewBitSet() + for _, alts := range altsets { + all.or(alts) + } + return all +} + +// +// This func gets the conflicting alt subsets from a configuration set. +// For each configuration {@code c} in {@code configs}: +// +//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+// 
+// +func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { + configToAlts := make(map[int]*BitSet) + + for _, c := range configs.GetItems() { + key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash() + + alts, ok := configToAlts[key] + if !ok { + alts = NewBitSet() + configToAlts[key] = alts + } + alts.add(c.GetAlt()) + } + + values := make([]*BitSet, 0, 10) + for _, v := range configToAlts { + values = append(values, v) + } + return values +} + +// +// Get a map from state to alt subset from a configuration set. For each +// configuration {@code c} in {@code configs}: +// +//
+// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
+// 
+// +func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { + m := NewAltDict() + + for _, c := range configs.GetItems() { + alts := m.Get(c.GetState().String()) + if alts == nil { + alts = NewBitSet() + m.put(c.GetState().String(), alts) + } + alts.(*BitSet).add(c.GetAlt()) + } + return m +} + +func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { + values := PredictionModeGetStateToAltMap(configs).values() + for i := 0; i < len(values); i++ { + if values[i].(*BitSet).length() == 1 { + return true + } + } + return false +} + +func PredictionModegetSingleViableAlt(altsets []*BitSet) int { + result := ATNInvalidAltNumber + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + minAlt := alts.minValue() + if result == ATNInvalidAltNumber { + result = minAlt + } else if result != minAlt { // more than 1 viable alt + return ATNInvalidAltNumber + } + } + return result +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go new file mode 100644 index 00000000000..d114800f429 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go @@ -0,0 +1,217 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strings" + + "strconv" +) + +type Recognizer interface { + GetLiteralNames() []string + GetSymbolicNames() []string + GetRuleNames() []string + + Sempred(RuleContext, int, int) bool + Precpred(RuleContext, int) bool + + GetState() int + SetState(int) + Action(RuleContext, int, int) + AddErrorListener(ErrorListener) + RemoveErrorListeners() + GetATN() *ATN + GetErrorListenerDispatch() ErrorListener +} + +type BaseRecognizer struct { + listeners []ErrorListener + state int + + RuleNames []string + LiteralNames []string + SymbolicNames []string + GrammarFileName string +} + +func NewBaseRecognizer() *BaseRecognizer { + rec := new(BaseRecognizer) + rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} + rec.state = -1 + return rec +} + +var tokenTypeMapCache = make(map[string]int) +var ruleIndexMapCache = make(map[string]int) + +func (b *BaseRecognizer) checkVersion(toolVersion string) { + runtimeVersion := "4.8" + if runtimeVersion != toolVersion { + fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) + } +} + +func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { + panic("action not implemented on Recognizer!") +} + +func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { + b.listeners = append(b.listeners, listener) +} + +func (b *BaseRecognizer) RemoveErrorListeners() { + b.listeners = make([]ErrorListener, 0) +} + +func (b *BaseRecognizer) GetRuleNames() []string { + return b.RuleNames +} + +func (b *BaseRecognizer) GetTokenNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetSymbolicNames() []string { + return b.SymbolicNames +} + +func (b *BaseRecognizer) GetLiteralNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetState() int { + return b.state +} + +func (b *BaseRecognizer) SetState(v int) { + b.state = v +} + +//func (b *Recognizer) GetTokenTypeMap() { +// var tokenNames = b.GetTokenNames() +// if (tokenNames==nil) { +// panic("The current recognizer does not provide a list of token names.") +// } +// var result = tokenTypeMapCache[tokenNames] +// if(result==nil) { +// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) +// result.EOF = TokenEOF +// tokenTypeMapCache[tokenNames] = result +// } +// return result +//} + +// Get a map from rule names to rule indexes. +// +//

Used for XPath and tree pattern compilation.

+// +func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { + + panic("Method not defined!") + // var ruleNames = b.GetRuleNames() + // if (ruleNames==nil) { + // panic("The current recognizer does not provide a list of rule names.") + // } + // + // var result = ruleIndexMapCache[ruleNames] + // if(result==nil) { + // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) + // ruleIndexMapCache[ruleNames] = result + // } + // return result +} + +func (b *BaseRecognizer) GetTokenType(tokenName string) int { + panic("Method not defined!") + // var ttype = b.GetTokenTypeMap()[tokenName] + // if (ttype !=nil) { + // return ttype + // } else { + // return TokenInvalidType + // } +} + +//func (b *Recognizer) GetTokenTypeMap() map[string]int { +// Vocabulary vocabulary = getVocabulary() +// +// Synchronized (tokenTypeMapCache) { +// Map result = tokenTypeMapCache.Get(vocabulary) +// if (result == null) { +// result = new HashMap() +// for (int i = 0; i < GetATN().maxTokenType; i++) { +// String literalName = vocabulary.getLiteralName(i) +// if (literalName != null) { +// result.put(literalName, i) +// } +// +// String symbolicName = vocabulary.GetSymbolicName(i) +// if (symbolicName != null) { +// result.put(symbolicName, i) +// } +// } +// +// result.put("EOF", Token.EOF) +// result = Collections.unmodifiableMap(result) +// tokenTypeMapCache.put(vocabulary, result) +// } +// +// return result +// } +//} + +// What is the error header, normally line/character position information?// +func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { + line := e.GetOffendingToken().GetLine() + column := e.GetOffendingToken().GetColumn() + return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) +} + +// How should a token be displayed in an error message? The default +// is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. +// +// @deprecated This method is not called by the ANTLR 4 Runtime. Specific +// implementations of {@link ANTLRErrorStrategy} may provide a similar +// feature when necessary. For example, see +// {@link DefaultErrorStrategy//GetTokenErrorDisplay}. +// +func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + + return "'" + s + "'" +} + +func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { + return NewProxyErrorListener(b.listeners) +} + +// subclass needs to override these if there are sempreds or actions +// that the ATN interp needs to execute +func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool { + return true +} + +func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool { + return true +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go new file mode 100644 index 00000000000..600cf8c0625 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go @@ -0,0 +1,114 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// A rule context is a record of a single rule invocation. It knows +// which context invoked it, if any. If there is no parent context, then +// naturally the invoking state is not valid. The parent link +// provides a chain upwards from the current rule invocation to the root +// of the invocation tree, forming a stack. We actually carry no +// information about the rule associated with b context (except +// when parsing). We keep only the state number of the invoking state from +// the ATN submachine that invoked b. Contrast b with the s +// pointer inside ParserRuleContext that tracks the current state +// being "executed" for the current rule. +// +// The parent contexts are useful for computing lookahead sets and +// getting error information. +// +// These objects are used during parsing and prediction. +// For the special case of parsers, we use the subclass +// ParserRuleContext. +// +// @see ParserRuleContext +// + +type RuleContext interface { + RuleNode + + GetInvokingState() int + SetInvokingState(int) + + GetRuleIndex() int + IsEmpty() bool + + GetAltNumber() int + SetAltNumber(altNumber int) + + String([]string, RuleContext) string +} + +type BaseRuleContext struct { + parentCtx RuleContext + invokingState int + RuleIndex int +} + +func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext { + + rn := new(BaseRuleContext) + + // What context invoked b rule? + rn.parentCtx = parent + + // What state invoked the rule associated with b context? + // The "return address" is the followState of invokingState + // If parent is nil, b should be -1. + if parent == nil { + rn.invokingState = -1 + } else { + rn.invokingState = invokingState + } + + return rn +} + +func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext { + return b +} + +func (b *BaseRuleContext) SetParent(v Tree) { + if v == nil { + b.parentCtx = nil + } else { + b.parentCtx = v.(RuleContext) + } +} + +func (b *BaseRuleContext) GetInvokingState() int { + return b.invokingState +} + +func (b *BaseRuleContext) SetInvokingState(t int) { + b.invokingState = t +} + +func (b *BaseRuleContext) GetRuleIndex() int { + return b.RuleIndex +} + +func (b *BaseRuleContext) GetAltNumber() int { + return ATNInvalidAltNumber +} + +func (b *BaseRuleContext) SetAltNumber(altNumber int) {} + +// A context is empty if there is no invoking state meaning nobody call +// current context. +func (b *BaseRuleContext) IsEmpty() bool { + return b.invokingState == -1 +} + +// Return the combined text of all child nodes. This method only considers +// tokens which have been added to the parse tree. +//

+// Since tokens on hidden channels (e.g. whitespace or comments) are not +// added to the parse trees, they will not appear in the output of b +// method. +// + +func (b *BaseRuleContext) GetParent() Tree { + return b.parentCtx +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go new file mode 100644 index 00000000000..49205a16240 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go @@ -0,0 +1,455 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// A tree structure used to record the semantic context in which +// an ATN configuration is valid. It's either a single predicate, +// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. +// +//

I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of +// {@link SemanticContext} within the scope of this outer class.

+// + +type SemanticContext interface { + comparable + + evaluate(parser Recognizer, outerContext RuleContext) bool + evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext + + hash() int + String() string +} + +func SemanticContextandContext(a, b SemanticContext) SemanticContext { + if a == nil || a == SemanticContextNone { + return b + } + if b == nil || b == SemanticContextNone { + return a + } + result := NewAND(a, b) + if len(result.opnds) == 1 { + return result.opnds[0] + } + + return result +} + +func SemanticContextorContext(a, b SemanticContext) SemanticContext { + if a == nil { + return b + } + if b == nil { + return a + } + if a == SemanticContextNone || b == SemanticContextNone { + return SemanticContextNone + } + result := NewOR(a, b) + if len(result.opnds) == 1 { + return result.opnds[0] + } + + return result +} + +type Predicate struct { + ruleIndex int + predIndex int + isCtxDependent bool +} + +func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { + p := new(Predicate) + + p.ruleIndex = ruleIndex + p.predIndex = predIndex + p.isCtxDependent = isCtxDependent // e.g., $i ref in pred + return p +} + +//The default {@link SemanticContext}, which is semantically equivalent to +//a predicate of the form {@code {true}?}. + +var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false) + +func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + return p +} + +func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool { + + var localctx RuleContext + + if p.isCtxDependent { + localctx = outerContext + } + + return parser.Sempred(localctx, p.ruleIndex, p.predIndex) +} + +func (p *Predicate) equals(other interface{}) bool { + if p == other { + return true + } else if _, ok := other.(*Predicate); !ok { + return false + } else { + return p.ruleIndex == other.(*Predicate).ruleIndex && + p.predIndex == other.(*Predicate).predIndex && + p.isCtxDependent == other.(*Predicate).isCtxDependent + } +} + +func (p *Predicate) hash() int { + return p.ruleIndex*43 + p.predIndex*47 +} + +func (p *Predicate) String() string { + return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?" +} + +type PrecedencePredicate struct { + precedence int +} + +func NewPrecedencePredicate(precedence int) *PrecedencePredicate { + + p := new(PrecedencePredicate) + p.precedence = precedence + + return p +} + +func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool { + return parser.Precpred(outerContext, p.precedence) +} + +func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + if parser.Precpred(outerContext, p.precedence) { + return SemanticContextNone + } + + return nil +} + +func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int { + return p.precedence - other.precedence +} + +func (p *PrecedencePredicate) equals(other interface{}) bool { + if p == other { + return true + } else if _, ok := other.(*PrecedencePredicate); !ok { + return false + } else { + return p.precedence == other.(*PrecedencePredicate).precedence + } +} + +func (p *PrecedencePredicate) hash() int { + return p.precedence * 51 +} + +func (p *PrecedencePredicate) String() string { + return "{" + strconv.Itoa(p.precedence) + ">=prec}?" +} + +func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate { + result := make([]*PrecedencePredicate, 0) + + for _, v := range set.values() { + if c2, ok := v.(*PrecedencePredicate); ok { + result = append(result, c2) + } + } + + return result +} + +// A semantic context which is true whenever none of the contained contexts +// is false.` + +type AND struct { + opnds []SemanticContext +} + +func NewAND(a, b SemanticContext) *AND { + + operands := NewSet(nil, nil) + if aa, ok := a.(*AND); ok { + for _, o := range aa.opnds { + operands.add(o) + } + } else { + operands.add(a) + } + + if ba, ok := b.(*AND); ok { + for _, o := range ba.opnds { + operands.add(o) + } + } else { + operands.add(b) + } + precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) + if len(precedencePredicates) > 0 { + // interested in the transition with the lowest precedence + var reduced *PrecedencePredicate + + for _, p := range precedencePredicates { + if reduced == nil || p.precedence < reduced.precedence { + reduced = p + } + } + + operands.add(reduced) + } + + vs := operands.values() + opnds := make([]SemanticContext, len(vs)) + for i, v := range vs { + opnds[i] = v.(SemanticContext) + } + + and := new(AND) + and.opnds = opnds + + return and +} + +func (a *AND) equals(other interface{}) bool { + if a == other { + return true + } else if _, ok := other.(*AND); !ok { + return false + } else { + for i, v := range other.(*AND).opnds { + if !a.opnds[i].equals(v) { + return false + } + } + return true + } +} + +// +// {@inheritDoc} +// +//

+// The evaluation of predicates by a context is short-circuiting, but +// unordered.

+// +func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { + for i := 0; i < len(a.opnds); i++ { + if !a.opnds[i].evaluate(parser, outerContext) { + return false + } + } + return true +} + +func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + differs := false + operands := make([]SemanticContext, 0) + + for i := 0; i < len(a.opnds); i++ { + context := a.opnds[i] + evaluated := context.evalPrecedence(parser, outerContext) + differs = differs || (evaluated != context) + if evaluated == nil { + // The AND context is false if any element is false + return nil + } else if evaluated != SemanticContextNone { + // Reduce the result by Skipping true elements + operands = append(operands, evaluated) + } + } + if !differs { + return a + } + + if len(operands) == 0 { + // all elements were true, so the AND context is true + return SemanticContextNone + } + + var result SemanticContext + + for _, o := range operands { + if result == nil { + result = o + } else { + result = SemanticContextandContext(result, o) + } + } + + return result +} + +func (a *AND) hash() int { + h := murmurInit(37) // Init with a value different from OR + for _, op := range a.opnds { + h = murmurUpdate(h, op.hash()) + } + return murmurFinish(h, len(a.opnds)) +} + +func (a *OR) hash() int { + h := murmurInit(41) // Init with a value different from AND + for _, op := range a.opnds { + h = murmurUpdate(h, op.hash()) + } + return murmurFinish(h, len(a.opnds)) +} + +func (a *AND) String() string { + s := "" + + for _, o := range a.opnds { + s += "&& " + fmt.Sprint(o) + } + + if len(s) > 3 { + return s[0:3] + } + + return s +} + +// +// A semantic context which is true whenever at least one of the contained +// contexts is true. +// + +type OR struct { + opnds []SemanticContext +} + +func NewOR(a, b SemanticContext) *OR { + + operands := NewSet(nil, nil) + if aa, ok := a.(*OR); ok { + for _, o := range aa.opnds { + operands.add(o) + } + } else { + operands.add(a) + } + + if ba, ok := b.(*OR); ok { + for _, o := range ba.opnds { + operands.add(o) + } + } else { + operands.add(b) + } + precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) + if len(precedencePredicates) > 0 { + // interested in the transition with the lowest precedence + var reduced *PrecedencePredicate + + for _, p := range precedencePredicates { + if reduced == nil || p.precedence > reduced.precedence { + reduced = p + } + } + + operands.add(reduced) + } + + vs := operands.values() + + opnds := make([]SemanticContext, len(vs)) + for i, v := range vs { + opnds[i] = v.(SemanticContext) + } + + o := new(OR) + o.opnds = opnds + + return o +} + +func (o *OR) equals(other interface{}) bool { + if o == other { + return true + } else if _, ok := other.(*OR); !ok { + return false + } else { + for i, v := range other.(*OR).opnds { + if !o.opnds[i].equals(v) { + return false + } + } + return true + } +} + +//

+// The evaluation of predicates by o context is short-circuiting, but +// unordered.

+// +func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { + for i := 0; i < len(o.opnds); i++ { + if o.opnds[i].evaluate(parser, outerContext) { + return true + } + } + return false +} + +func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + differs := false + operands := make([]SemanticContext, 0) + for i := 0; i < len(o.opnds); i++ { + context := o.opnds[i] + evaluated := context.evalPrecedence(parser, outerContext) + differs = differs || (evaluated != context) + if evaluated == SemanticContextNone { + // The OR context is true if any element is true + return SemanticContextNone + } else if evaluated != nil { + // Reduce the result by Skipping false elements + operands = append(operands, evaluated) + } + } + if !differs { + return o + } + if len(operands) == 0 { + // all elements were false, so the OR context is false + return nil + } + var result SemanticContext + + for _, o := range operands { + if result == nil { + result = o + } else { + result = SemanticContextorContext(result, o) + } + } + + return result +} + +func (o *OR) String() string { + s := "" + + for _, o := range o.opnds { + s += "|| " + fmt.Sprint(o) + } + + if len(s) > 3 { + return s[0:3] + } + + return s +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go new file mode 100644 index 00000000000..2d8e99095d3 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go @@ -0,0 +1,210 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" + "strings" +) + +type TokenSourceCharStreamPair struct { + tokenSource TokenSource + charStream CharStream +} + +// A token has properties: text, type, line, character position in the line +// (so we can ignore tabs), token channel, index, and source from which +// we obtained this token. + +type Token interface { + GetSource() *TokenSourceCharStreamPair + GetTokenType() int + GetChannel() int + GetStart() int + GetStop() int + GetLine() int + GetColumn() int + + GetText() string + SetText(s string) + + GetTokenIndex() int + SetTokenIndex(v int) + + GetTokenSource() TokenSource + GetInputStream() CharStream +} + +type BaseToken struct { + source *TokenSourceCharStreamPair + tokenType int // token type of the token + channel int // The parser ignores everything not on DEFAULT_CHANNEL + start int // optional return -1 if not implemented. + stop int // optional return -1 if not implemented. + tokenIndex int // from 0..n-1 of the token object in the input stream + line int // line=1..n of the 1st character + column int // beginning of the line at which it occurs, 0..n-1 + text string // text of the token. + readOnly bool +} + +const ( + TokenInvalidType = 0 + + // During lookahead operations, this "token" signifies we hit rule end ATN state + // and did not follow it despite needing to. + TokenEpsilon = -2 + + TokenMinUserTokenType = 1 + + TokenEOF = -1 + + // All tokens go to the parser (unless Skip() is called in that rule) + // on a particular "channel". The parser tunes to a particular channel + // so that whitespace etc... can go to the parser on a "hidden" channel. + + TokenDefaultChannel = 0 + + // Anything on different channel than DEFAULT_CHANNEL is not parsed + // by parser. + + TokenHiddenChannel = 1 +) + +func (b *BaseToken) GetChannel() int { + return b.channel +} + +func (b *BaseToken) GetStart() int { + return b.start +} + +func (b *BaseToken) GetStop() int { + return b.stop +} + +func (b *BaseToken) GetLine() int { + return b.line +} + +func (b *BaseToken) GetColumn() int { + return b.column +} + +func (b *BaseToken) GetTokenType() int { + return b.tokenType +} + +func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { + return b.source +} + +func (b *BaseToken) GetTokenIndex() int { + return b.tokenIndex +} + +func (b *BaseToken) SetTokenIndex(v int) { + b.tokenIndex = v +} + +func (b *BaseToken) GetTokenSource() TokenSource { + return b.source.tokenSource +} + +func (b *BaseToken) GetInputStream() CharStream { + return b.source.charStream +} + +type CommonToken struct { + *BaseToken +} + +func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { + + t := new(CommonToken) + + t.BaseToken = new(BaseToken) + + t.source = source + t.tokenType = tokenType + t.channel = channel + t.start = start + t.stop = stop + t.tokenIndex = -1 + if t.source.tokenSource != nil { + t.line = source.tokenSource.GetLine() + t.column = source.tokenSource.GetCharPositionInLine() + } else { + t.column = -1 + } + return t +} + +// An empty {@link Pair} which is used as the default value of +// {@link //source} for tokens that do not have a source. + +//CommonToken.EMPTY_SOURCE = [ nil, nil ] + +// Constructs a New{@link CommonToken} as a copy of another {@link Token}. +// +//

+// If {@code oldToken} is also a {@link CommonToken} instance, the newly +// constructed token will share a reference to the {@link //text} field and +// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will +// be assigned the result of calling {@link //GetText}, and {@link //source} +// will be constructed from the result of {@link Token//GetTokenSource} and +// {@link Token//GetInputStream}.

+// +// @param oldToken The token to copy. +// +func (c *CommonToken) clone() *CommonToken { + t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) + t.tokenIndex = c.GetTokenIndex() + t.line = c.GetLine() + t.column = c.GetColumn() + t.text = c.GetText() + return t +} + +func (c *CommonToken) GetText() string { + if c.text != "" { + return c.text + } + input := c.GetInputStream() + if input == nil { + return "" + } + n := input.Size() + if c.start < n && c.stop < n { + return input.GetTextFromInterval(NewInterval(c.start, c.stop)) + } + return "" +} + +func (c *CommonToken) SetText(text string) { + c.text = text +} + +func (c *CommonToken) String() string { + txt := c.GetText() + if txt != "" { + txt = strings.Replace(txt, "\n", "\\n", -1) + txt = strings.Replace(txt, "\r", "\\r", -1) + txt = strings.Replace(txt, "\t", "\\t", -1) + } else { + txt = "" + } + + var ch string + if c.channel > 0 { + ch = ",channel=" + strconv.Itoa(c.channel) + } else { + ch = "" + } + + return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" + + txt + "',<" + strconv.Itoa(c.tokenType) + ">" + + ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]" +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go new file mode 100644 index 00000000000..e023978fef4 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go @@ -0,0 +1,17 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type TokenSource interface { + NextToken() Token + Skip() + More() + GetLine() int + GetCharPositionInLine() int + GetInputStream() CharStream + GetSourceName() string + setTokenFactory(factory TokenFactory) + GetTokenFactory() TokenFactory +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go new file mode 100644 index 00000000000..df92c814789 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go @@ -0,0 +1,20 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type TokenStream interface { + IntStream + + LT(k int) Token + + Get(index int) Token + GetTokenSource() TokenSource + SetTokenSource(TokenSource) + + GetAllText() string + GetTextFromInterval(*Interval) string + GetTextFromRuleContext(RuleContext) string + GetTextFromTokens(Token, Token) string +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go new file mode 100644 index 00000000000..96a03f02aa6 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go @@ -0,0 +1,649 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. +package antlr + +import ( +"bytes" +"fmt" +) + + +// +// Useful for rewriting out a buffered input token stream after doing some +// augmentation or other manipulations on it. + +//

+// You can insert stuff, replace, and delete chunks. Note that the operations +// are done lazily--only if you convert the buffer to a {@link String} with +// {@link TokenStream#getText()}. This is very efficient because you are not +// moving data around all the time. As the buffer of tokens is converted to +// strings, the {@link #getText()} method(s) scan the input token stream and +// check to see if there is an operation at the current index. If so, the +// operation is done and then normal {@link String} rendering continues on the +// buffer. This is like having multiple Turing machine instruction streams +// (programs) operating on a single input tape. :)

+//

+ +// This rewriter makes no modifications to the token stream. It does not ask the +// stream to fill itself up nor does it advance the input cursor. The token +// stream {@link TokenStream#index()} will return the same value before and +// after any {@link #getText()} call.

+ +//

+// The rewriter only works on tokens that you have in the buffer and ignores the +// current input cursor. If you are buffering tokens on-demand, calling +// {@link #getText()} halfway through the input will only do rewrites for those +// tokens in the first half of the file.

+ +//

+// Since the operations are done lazily at {@link #getText}-time, operations do +// not screw up the token index values. That is, an insert operation at token +// index {@code i} does not change the index values for tokens +// {@code i}+1..n-1.

+ +//

+// Because operations never actually alter the buffer, you may always get the +// original token stream back without undoing anything. Since the instructions +// are queued up, you can easily simulate transactions and roll back any changes +// if there is an error just by removing instructions. For example,

+ +//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+// 
+ +//

+// Then in the rules, you can execute (assuming rewriter is visible):

+ +//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+// 
+ +//

+// You can also have multiple "instruction streams" and get multiple rewrites +// from a single pass over the input. Just name the instruction streams and use +// that name again when printing the buffer. This could be useful for generating +// a C file and also its header file--all from the same buffer:

+ +//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+// 
+ +//

+// If you don't use named rewrite streams, a "default" stream is used as the +// first example shows.

+ + + +const( + Default_Program_Name = "default" + Program_Init_Size = 100 + Min_Token_Index = 0 +) + +// Define the rewrite operation hierarchy + +type RewriteOperation interface { + // Execute the rewrite operation by possibly adding to the buffer. + // Return the index of the next token to operate on. + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream + SetInstructionIndex(val int) + SetIndex(int) + SetText(string) + SetOpName(string) + SetTokens(TokenStream) +} + +type BaseRewriteOperation struct { + //Current index of rewrites list + instruction_index int + //Token buffer index + index int + //Substitution text + text string + //Actual operation name + op_name string + //Pointer to token steam + tokens TokenStream +} + +func (op *BaseRewriteOperation)GetInstructionIndex() int{ + return op.instruction_index +} + +func (op *BaseRewriteOperation)GetIndex() int{ + return op.index +} + +func (op *BaseRewriteOperation)GetText() string{ + return op.text +} + +func (op *BaseRewriteOperation)GetOpName() string{ + return op.op_name +} + +func (op *BaseRewriteOperation)GetTokens() TokenStream{ + return op.tokens +} + +func (op *BaseRewriteOperation)SetInstructionIndex(val int){ + op.instruction_index = val +} + +func (op *BaseRewriteOperation)SetIndex(val int) { + op.index = val +} + +func (op *BaseRewriteOperation)SetText(val string){ + op.text = val +} + +func (op *BaseRewriteOperation)SetOpName(val string){ + op.op_name = val +} + +func (op *BaseRewriteOperation)SetTokens(val TokenStream) { + op.tokens = val +} + + +func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{ + return op.index +} + +func (op *BaseRewriteOperation) String() string { + return fmt.Sprintf("<%s@%d:\"%s\">", + op.op_name, + op.tokens.Get(op.GetIndex()), + op.text, + ) + +} + + +type InsertBeforeOp struct { + BaseRewriteOperation +} + +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{ + return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{ + index:index, + text:text, + op_name:"InsertBeforeOp", + tokens:stream, + }} +} + +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{ + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index+1 +} + +func (op *InsertBeforeOp) String() string { + return op.BaseRewriteOperation.String() +} + +// Distinguish between insert after/before to do the "insert afters" +// first and then the "insert befores" at same index. Implementation +// of "insert after" is "insert before index+1". + +type InsertAfterOp struct { + BaseRewriteOperation +} + +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{ + return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{ + index:index+1, + text:text, + tokens:stream, + }} +} + +func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index+1 +} + +func (op *InsertAfterOp) String() string { + return op.BaseRewriteOperation.String() +} + +// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp +// instructions. +type ReplaceOp struct{ + BaseRewriteOperation + LastIndex int +} + +func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp { + return &ReplaceOp{ + BaseRewriteOperation:BaseRewriteOperation{ + index:from, + text:text, + op_name:"ReplaceOp", + tokens:stream, + }, + LastIndex:to, + } +} + +func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{ + if op.text != ""{ + buffer.WriteString(op.text) + } + return op.LastIndex +1 +} + +func (op *ReplaceOp) String() string { + if op.text == "" { + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) + } + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) +} + + +type TokenStreamRewriter struct { + //Our source stream + tokens TokenStream + // You may have multiple, named streams of rewrite operations. + // I'm calling these things "programs." + // Maps String (name) → rewrite (List) + programs map[string][]RewriteOperation + last_rewrite_token_indexes map[string]int +} + +func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{ + return &TokenStreamRewriter{ + tokens: tokens, + programs: map[string][]RewriteOperation{ + Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size), + }, + last_rewrite_token_indexes: map[string]int{}, + } +} + +func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{ + return tsr.tokens +} + +// Rollback the instruction stream for a program so that +// the indicated instruction (via instructionIndex) is no +// longer in the stream. UNTESTED! +func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){ + is, ok := tsr.programs[program_name] + if ok{ + tsr.programs[program_name] = is[Min_Token_Index:instruction_index] + } +} + +func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){ + tsr.Rollback(Default_Program_Name, instruction_index) +} +//Reset the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){ + tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included +} + +func (tsr *TokenStreamRewriter) DeleteProgramDefault(){ + tsr.DeleteProgram(Default_Program_Name) +} + +func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){ + // to insert after, just insert before next index (even if past end) + var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){ + tsr.InsertAfter(Default_Program_Name, index, text) +} + +func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){ + tsr.InsertAfter(program_name, token.GetTokenIndex(), text) +} + +func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){ + var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){ + tsr.InsertBefore(Default_Program_Name, index, text) +} + +func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){ + tsr.InsertBefore(program_name, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){ + if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){ + panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", + from, to, tsr.tokens.Size())) + } + var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) { + tsr.Replace(Default_Program_Name, from, to, text) +} + +func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){ + tsr.ReplaceDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){ + tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){ + tsr.ReplaceToken(Default_Program_Name, from, to, text) +} + +func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){ + tsr.ReplaceTokenDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){ + tsr.Replace(program_name, from, to, "" ) +} + +func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){ + tsr.Delete(Default_Program_Name, from, to) +} + +func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){ + tsr.DeleteDefault(index,index) +} + +func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) { + tsr.ReplaceToken(program_name, from, to, "") +} + +func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){ + tsr.DeleteToken(Default_Program_Name, from, to) +} + +func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int { + i, ok := tsr.last_rewrite_token_indexes[program_name] + if !ok{ + return -1 + } + return i +} + +func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{ + return tsr.GetLastRewriteTokenIndex(Default_Program_Name) +} + +func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){ + tsr.last_rewrite_token_indexes[program_name] = i +} + +func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{ + is := make([]RewriteOperation, 0, Program_Init_Size) + tsr.programs[name] = is + return is +} + +func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){ + is := tsr.GetProgram(name) + is = append(is, op) + tsr.programs[name] = is +} + +func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation { + is, ok := tsr.programs[name] + if !ok{ + is = tsr.InitializeProgram(name) + } + return is +} +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter)GetTextDefault() string{ + return tsr.GetText( + Default_Program_Name, + NewInterval(0, tsr.tokens.Size()-1)) +} +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string { + rewrites := tsr.programs[program_name] + start := interval.Start + stop := interval.Stop + // ensure start/end are in range + stop = min(stop, tsr.tokens.Size()-1) + start = max(start,0) + if rewrites == nil || len(rewrites) == 0{ + return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute + } + buf := bytes.Buffer{} + // First, optimize instruction stream + indexToOp := reduceToSingleOperationPerIndex(rewrites) + // Walk buffer, executing instructions and emitting tokens + for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())} + } + } + return buf.String() +} + +// We need to combine operations and report invalid operations (like +// overlapping replaces that are not completed nested). Inserts to +// same index need to be combined etc... Here are the cases: +// +// I.i.u I.j.v leave alone, nonoverlapping +// I.i.u I.i.v combine: Iivu +// +// R.i-j.u R.x-y.v | i-j in x-y delete first R +// R.i-j.u R.i-j.v delete first R +// R.i-j.u R.x-y.v | x-y in i-j ERROR +// R.i-j.u R.x-y.v | boundaries overlap ERROR +// +// Delete special case of replace (text==null): +// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) +// +// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before +// we're not deleting i) +// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping +// R.x-y.v I.i.u | i in x-y ERROR +// R.x-y.v I.x.u R.x-y.uv (combine, delete I) +// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping +// +// I.i.u = insert u before op @ index i +// R.x-y.u = replace x-y indexed tokens with u +// +// First we need to examine replaces. For any replace op: +// +// 1. wipe out any insertions before op within that range. +// 2. Drop any replace op before that is contained completely within +// that range. +// 3. Throw exception upon boundary overlap with any previous replace. +// +// Then we can deal with inserts: +// +// 1. for any inserts to same index, combine even if not adjacent. +// 2. for any prior replace with same left boundary, combine this +// insert with replace and delete this replace. +// 3. throw exception if index in same range as previous replace +// +// Don't actually delete; make op null in list. Easier to walk list. +// Later we can throw as we add to index → op map. +// +// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the +// inserted stuff would be before the replace range. But, if you +// add tokens in front of a method body '{' and then delete the method +// body, I think the stuff before the '{' you added should disappear too. +// +// Return a map from token index to operation. +// +func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{ + // WALK REPLACES + for i:=0; i < len(rewrites); i++{ + op := rewrites[i] + if op == nil{continue} + rop, ok := op.(*ReplaceOp) + if !ok{continue} + // Wipe prior inserts within range + for j:=0; j rop.index && iop.index <=rop.LastIndex{ + // delete insert as it's a no-op. + rewrites[iop.instruction_index] = nil + } + } + } + // Drop any prior replaces contained within + for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{ + // delete replace as it's a no-op. + rewrites[prevop.instruction_index] = nil + continue + } + // throw exception unless disjoint or identical + disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex + // Delete special case of replace (text==null): + // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) + if prevop.text == "" && rop.text == "" && !disjoint{ + rewrites[prevop.instruction_index] = nil + rop.index = min(prevop.index, rop.index) + rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) + println("new rop" + rop.String()) //TODO: remove console write, taken from Java version + }else if !disjoint{ + panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) + } + } + } + } + // WALK INSERTS + for i:=0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil{continue} + //hack to replicate inheritance in composition + _, iok := rewrites[i].(*InsertBeforeOp) + _, aok := rewrites[i].(*InsertAfterOp) + if !iok && !aok{continue} + iop := rewrites[i] + // combine current insert with prior if any at same index + // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic + for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{ + panic("insert op "+iop.String()+" within boundaries of previous "+rop.String()) + } + } + } + } + m := map[int]RewriteOperation{} + for i:=0; i < len(rewrites); i++{ + op := rewrites[i] + if op == nil {continue} + if _, ok := m[op.GetIndex()]; ok{ + panic("should only be one op per index") + } + m[op.GetIndex()] = op + } + return m +} + + +/* + Quick fixing Go lack of overloads + */ + +func max(a,b int)int{ + if a>b{ + return a + }else { + return b + } +} +func min(a,b int)int{ + if aThis is a one way link. It emanates from a state (usually via a list of +// transitions) and has a target state.

+// +//

Since we never have to change the ATN transitions once we construct it, +// the states. We'll use the term Edge for the DFA to distinguish them from +// ATN transitions.

+ +type Transition interface { + getTarget() ATNState + setTarget(ATNState) + getIsEpsilon() bool + getLabel() *IntervalSet + getSerializationType() int + Matches(int, int, int) bool +} + +type BaseTransition struct { + target ATNState + isEpsilon bool + label int + intervalSet *IntervalSet + serializationType int +} + +func NewBaseTransition(target ATNState) *BaseTransition { + + if target == nil { + panic("target cannot be nil.") + } + + t := new(BaseTransition) + + t.target = target + // Are we epsilon, action, sempred? + t.isEpsilon = false + t.intervalSet = nil + + return t +} + +func (t *BaseTransition) getTarget() ATNState { + return t.target +} + +func (t *BaseTransition) setTarget(s ATNState) { + t.target = s +} + +func (t *BaseTransition) getIsEpsilon() bool { + return t.isEpsilon +} + +func (t *BaseTransition) getLabel() *IntervalSet { + return t.intervalSet +} + +func (t *BaseTransition) getSerializationType() int { + return t.serializationType +} + +func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + panic("Not implemented") +} + +const ( + TransitionEPSILON = 1 + TransitionRANGE = 2 + TransitionRULE = 3 + TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? + TransitionATOM = 5 + TransitionACTION = 6 + TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 + TransitionNOTSET = 8 + TransitionWILDCARD = 9 + TransitionPRECEDENCE = 10 +) + +var TransitionserializationNames = []string{ + "INVALID", + "EPSILON", + "RANGE", + "RULE", + "PREDICATE", + "ATOM", + "ACTION", + "SET", + "NOT_SET", + "WILDCARD", + "PRECEDENCE", +} + +//var TransitionserializationTypes struct { +// EpsilonTransition int +// RangeTransition int +// RuleTransition int +// PredicateTransition int +// AtomTransition int +// ActionTransition int +// SetTransition int +// NotSetTransition int +// WildcardTransition int +// PrecedencePredicateTransition int +//}{ +// TransitionEPSILON, +// TransitionRANGE, +// TransitionRULE, +// TransitionPREDICATE, +// TransitionATOM, +// TransitionACTION, +// TransitionSET, +// TransitionNOTSET, +// TransitionWILDCARD, +// TransitionPRECEDENCE +//} + +// TODO: make all transitions sets? no, should remove set edges +type AtomTransition struct { + *BaseTransition +} + +func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { + + t := new(AtomTransition) + t.BaseTransition = NewBaseTransition(target) + + t.label = intervalSet // The token type or character value or, signifies special intervalSet. + t.intervalSet = t.makeLabel() + t.serializationType = TransitionATOM + + return t +} + +func (t *AtomTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addOne(t.label) + return s +} + +func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return t.label == symbol +} + +func (t *AtomTransition) String() string { + return strconv.Itoa(t.label) +} + +type RuleTransition struct { + *BaseTransition + + followState ATNState + ruleIndex, precedence int +} + +func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { + + t := new(RuleTransition) + t.BaseTransition = NewBaseTransition(ruleStart) + + t.ruleIndex = ruleIndex + t.precedence = precedence + t.followState = followState + t.serializationType = TransitionRULE + t.isEpsilon = true + + return t +} + +func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +type EpsilonTransition struct { + *BaseTransition + + outermostPrecedenceReturn int +} + +func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { + + t := new(EpsilonTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionEPSILON + t.isEpsilon = true + t.outermostPrecedenceReturn = outermostPrecedenceReturn + return t +} + +func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *EpsilonTransition) String() string { + return "epsilon" +} + +type RangeTransition struct { + *BaseTransition + + start, stop int +} + +func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { + + t := new(RangeTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionRANGE + t.start = start + t.stop = stop + t.intervalSet = t.makeLabel() + return t +} + +func (t *RangeTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addRange(t.start, t.stop) + return s +} + +func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= t.start && symbol <= t.stop +} + +func (t *RangeTransition) String() string { + return "'" + string(t.start) + "'..'" + string(t.stop) + "'" +} + +type AbstractPredicateTransition interface { + Transition + IAbstractPredicateTransitionFoo() +} + +type BaseAbstractPredicateTransition struct { + *BaseTransition +} + +func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { + + t := new(BaseAbstractPredicateTransition) + t.BaseTransition = NewBaseTransition(target) + + return t +} + +func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} + +type PredicateTransition struct { + *BaseAbstractPredicateTransition + + isCtxDependent bool + ruleIndex, predIndex int +} + +func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { + + t := new(PredicateTransition) + t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) + + t.serializationType = TransitionPREDICATE + t.ruleIndex = ruleIndex + t.predIndex = predIndex + t.isCtxDependent = isCtxDependent // e.g., $i ref in pred + t.isEpsilon = true + return t +} + +func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *PredicateTransition) getPredicate() *Predicate { + return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) +} + +func (t *PredicateTransition) String() string { + return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) +} + +type ActionTransition struct { + *BaseTransition + + isCtxDependent bool + ruleIndex, actionIndex, predIndex int +} + +func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { + + t := new(ActionTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionACTION + t.ruleIndex = ruleIndex + t.actionIndex = actionIndex + t.isCtxDependent = isCtxDependent // e.g., $i ref in pred + t.isEpsilon = true + return t +} + +func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *ActionTransition) String() string { + return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) +} + +type SetTransition struct { + *BaseTransition +} + +func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { + + t := new(SetTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionSET + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + + return t +} + +func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return t.intervalSet.contains(symbol) +} + +func (t *SetTransition) String() string { + return t.intervalSet.String() +} + +type NotSetTransition struct { + *SetTransition +} + +func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { + + t := new(NotSetTransition) + + t.SetTransition = NewSetTransition(target, set) + + t.serializationType = TransitionNOTSET + + return t +} + +func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) +} + +func (t *NotSetTransition) String() string { + return "~" + t.intervalSet.String() +} + +type WildcardTransition struct { + *BaseTransition +} + +func NewWildcardTransition(target ATNState) *WildcardTransition { + + t := new(WildcardTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionWILDCARD + return t +} + +func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol +} + +func (t *WildcardTransition) String() string { + return "." +} + +type PrecedencePredicateTransition struct { + *BaseAbstractPredicateTransition + + precedence int +} + +func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { + + t := new(PrecedencePredicateTransition) + t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) + + t.serializationType = TransitionPRECEDENCE + t.precedence = precedence + t.isEpsilon = true + + return t +} + +func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { + return NewPrecedencePredicate(t.precedence) +} + +func (t *PrecedencePredicateTransition) String() string { + return fmt.Sprint(t.precedence) + " >= _p" +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go new file mode 100644 index 00000000000..ad0eabf005d --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go @@ -0,0 +1,251 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The basic notion of a tree has a parent, a payload, and a list of children. +// It is the most abstract interface for all the trees used by ANTLR. +/// + +var TreeInvalidInterval = NewInterval(-1, -2) + +type Tree interface { + GetParent() Tree + SetParent(Tree) + GetPayload() interface{} + GetChild(i int) Tree + GetChildCount() int + GetChildren() []Tree +} + +type SyntaxTree interface { + Tree + + GetSourceInterval() *Interval +} + +type ParseTree interface { + SyntaxTree + + Accept(Visitor ParseTreeVisitor) interface{} + GetText() string + + ToStringTree([]string, Recognizer) string +} + +type RuleNode interface { + ParseTree + + GetRuleContext() RuleContext + GetBaseRuleContext() *BaseRuleContext +} + +type TerminalNode interface { + ParseTree + + GetSymbol() Token +} + +type ErrorNode interface { + TerminalNode + + errorNode() +} + +type ParseTreeVisitor interface { + Visit(tree ParseTree) interface{} + VisitChildren(node RuleNode) interface{} + VisitTerminal(node TerminalNode) interface{} + VisitErrorNode(node ErrorNode) interface{} +} + +type BaseParseTreeVisitor struct{} + +var _ ParseTreeVisitor = &BaseParseTreeVisitor{} + +func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } + +// TODO +//func (this ParseTreeVisitor) Visit(ctx) { +// if (Utils.isArray(ctx)) { +// self := this +// return ctx.map(function(child) { return VisitAtom(self, child)}) +// } else { +// return VisitAtom(this, ctx) +// } +//} +// +//func VisitAtom(Visitor, ctx) { +// if (ctx.parser == nil) { //is terminal +// return +// } +// +// name := ctx.parser.ruleNames[ctx.ruleIndex] +// funcName := "Visit" + Utils.titleCase(name) +// +// return Visitor[funcName](ctx) +//} + +type ParseTreeListener interface { + VisitTerminal(node TerminalNode) + VisitErrorNode(node ErrorNode) + EnterEveryRule(ctx ParserRuleContext) + ExitEveryRule(ctx ParserRuleContext) +} + +type BaseParseTreeListener struct{} + +var _ ParseTreeListener = &BaseParseTreeListener{} + +func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} +func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} +func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} +func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} + +type TerminalNodeImpl struct { + parentCtx RuleContext + + symbol Token +} + +var _ TerminalNode = &TerminalNodeImpl{} + +func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { + tn := new(TerminalNodeImpl) + + tn.parentCtx = nil + tn.symbol = symbol + + return tn +} + +func (t *TerminalNodeImpl) GetChild(i int) Tree { + return nil +} + +func (t *TerminalNodeImpl) GetChildren() []Tree { + return nil +} + +func (t *TerminalNodeImpl) SetChildren(tree []Tree) { + panic("Cannot set children on terminal node") +} + +func (t *TerminalNodeImpl) GetSymbol() Token { + return t.symbol +} + +func (t *TerminalNodeImpl) GetParent() Tree { + return t.parentCtx +} + +func (t *TerminalNodeImpl) SetParent(tree Tree) { + t.parentCtx = tree.(RuleContext) +} + +func (t *TerminalNodeImpl) GetPayload() interface{} { + return t.symbol +} + +func (t *TerminalNodeImpl) GetSourceInterval() *Interval { + if t.symbol == nil { + return TreeInvalidInterval + } + tokenIndex := t.symbol.GetTokenIndex() + return NewInterval(tokenIndex, tokenIndex) +} + +func (t *TerminalNodeImpl) GetChildCount() int { + return 0 +} + +func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitTerminal(t) +} + +func (t *TerminalNodeImpl) GetText() string { + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) String() string { + if t.symbol.GetTokenType() == TokenEOF { + return "" + } + + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string { + return t.String() +} + +// Represents a token that was consumed during reSynchronization +// rather than during a valid Match operation. For example, +// we will create this kind of a node during single token insertion +// and deletion as well as during "consume until error recovery set" +// upon no viable alternative exceptions. + +type ErrorNodeImpl struct { + *TerminalNodeImpl +} + +var _ ErrorNode = &ErrorNodeImpl{} + +func NewErrorNodeImpl(token Token) *ErrorNodeImpl { + en := new(ErrorNodeImpl) + en.TerminalNodeImpl = NewTerminalNodeImpl(token) + return en +} + +func (e *ErrorNodeImpl) errorNode() {} + +func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitErrorNode(e) +} + +type ParseTreeWalker struct { +} + +func NewParseTreeWalker() *ParseTreeWalker { + return new(ParseTreeWalker) +} + +func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + switch tt := t.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + p.EnterRule(listener, t.(RuleNode)) + for i := 0; i < t.GetChildCount(); i++ { + child := t.GetChild(i) + p.Walk(listener, child) + } + p.ExitRule(listener, t.(RuleNode)) + } +} + +// +// The discovery of a rule node, involves sending two events: the generic +// {@link ParseTreeListener//EnterEveryRule} and a +// {@link RuleContext}-specific event. First we trigger the generic and then +// the rule specific. We to them in reverse order upon finishing the node. +// +func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) +} + +func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) +} + +var ParseTreeWalkerDefault = NewParseTreeWalker() diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go new file mode 100644 index 00000000000..80144ecadea --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go @@ -0,0 +1,137 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "fmt" + +/** A set of utility routines useful for all kinds of ANTLR trees. */ + +// Print out a whole tree in LISP form. {@link //getNodeText} is used on the +// node payloads to get the text for the nodes. Detect +// parse trees and extract data appropriately. +func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { + + if recog != nil { + ruleNames = recog.GetRuleNames() + } + + s := TreesGetNodeText(tree, ruleNames, nil) + + s = EscapeWhitespace(s, false) + c := tree.GetChildCount() + if c == 0 { + return s + } + res := "(" + s + " " + if c > 0 { + s = TreesStringTree(tree.GetChild(0), ruleNames, nil) + res += s + } + for i := 1; i < c; i++ { + s = TreesStringTree(tree.GetChild(i), ruleNames, nil) + res += (" " + s) + } + res += ")" + return res +} + +func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { + if recog != nil { + ruleNames = recog.GetRuleNames() + } + + if ruleNames != nil { + switch t2 := t.(type) { + case RuleNode: + t3 := t2.GetRuleContext() + altNumber := t3.GetAltNumber() + + if altNumber != ATNInvalidAltNumber { + return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber) + } + return ruleNames[t3.GetRuleIndex()] + case ErrorNode: + return fmt.Sprint(t2) + case TerminalNode: + if t2.GetSymbol() != nil { + return t2.GetSymbol().GetText() + } + } + } + + // no recog for rule names + payload := t.GetPayload() + if p2, ok := payload.(Token); ok { + return p2.GetText() + } + + return fmt.Sprint(t.GetPayload()) +} + +// Return ordered list of all children of this node +func TreesGetChildren(t Tree) []Tree { + list := make([]Tree, 0) + for i := 0; i < t.GetChildCount(); i++ { + list = append(list, t.GetChild(i)) + } + return list +} + +// Return a list of all ancestors of this node. The first node of +// list is the root and the last is the parent of this node. +// +func TreesgetAncestors(t Tree) []Tree { + ancestors := make([]Tree, 0) + t = t.GetParent() + for t != nil { + f := []Tree{t} + ancestors = append(f, ancestors...) + t = t.GetParent() + } + return ancestors +} + +func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { + return TreesfindAllNodes(t, ttype, true) +} + +func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { + return TreesfindAllNodes(t, ruleIndex, false) +} + +func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree { + nodes := make([]ParseTree, 0) + treesFindAllNodes(t, index, findTokens, &nodes) + return nodes +} + +func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) { + // check this node (the root) first + + t2, ok := t.(TerminalNode) + t3, ok2 := t.(ParserRuleContext) + + if findTokens && ok { + if t2.GetSymbol().GetTokenType() == index { + *nodes = append(*nodes, t2) + } + } else if !findTokens && ok2 { + if t3.GetRuleIndex() == index { + *nodes = append(*nodes, t3) + } + } + // check children + for i := 0; i < t.GetChildCount(); i++ { + treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes) + } +} + +func TreesDescendants(t ParseTree) []ParseTree { + nodes := []ParseTree{t} + for i := 0; i < t.GetChildCount(); i++ { + nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...) + } + return nodes +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go new file mode 100644 index 00000000000..bba2ffae793 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go @@ -0,0 +1,417 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "errors" + "fmt" + "sort" + "strconv" + "strings" +) + +func intMin(a, b int) int { + if a < b { + return a + } + return b +} + +func intMax(a, b int) int { + if a > b { + return a + } + return b +} + +// A simple integer stack + +type IntStack []int + +var ErrEmptyStack = errors.New("Stack is empty") + +func (s *IntStack) Pop() (int, error) { + l := len(*s) - 1 + if l < 0 { + return 0, ErrEmptyStack + } + v := (*s)[l] + *s = (*s)[0:l] + return v, nil +} + +func (s *IntStack) Push(e int) { + *s = append(*s, e) +} + +type Set struct { + data map[int][]interface{} + hashcodeFunction func(interface{}) int + equalsFunction func(interface{}, interface{}) bool +} + +func NewSet( + hashcodeFunction func(interface{}) int, + equalsFunction func(interface{}, interface{}) bool) *Set { + + s := new(Set) + + s.data = make(map[int][]interface{}) + + if hashcodeFunction != nil { + s.hashcodeFunction = hashcodeFunction + } else { + s.hashcodeFunction = standardHashFunction + } + + if equalsFunction == nil { + s.equalsFunction = standardEqualsFunction + } else { + s.equalsFunction = equalsFunction + } + + return s +} + +func standardEqualsFunction(a interface{}, b interface{}) bool { + + ac, oka := a.(comparable) + bc, okb := b.(comparable) + + if !oka || !okb { + panic("Not Comparable") + } + + return ac.equals(bc) +} + +func standardHashFunction(a interface{}) int { + if h, ok := a.(hasher); ok { + return h.hash() + } + + panic("Not Hasher") +} + +type hasher interface { + hash() int +} + +func (s *Set) length() int { + return len(s.data) +} + +func (s *Set) add(value interface{}) interface{} { + + key := s.hashcodeFunction(value) + + values := s.data[key] + + if s.data[key] != nil { + for i := 0; i < len(values); i++ { + if s.equalsFunction(value, values[i]) { + return values[i] + } + } + + s.data[key] = append(s.data[key], value) + return value + } + + v := make([]interface{}, 1, 10) + v[0] = value + s.data[key] = v + + return value +} + +func (s *Set) contains(value interface{}) bool { + + key := s.hashcodeFunction(value) + + values := s.data[key] + + if s.data[key] != nil { + for i := 0; i < len(values); i++ { + if s.equalsFunction(value, values[i]) { + return true + } + } + } + return false +} + +func (s *Set) values() []interface{} { + var l []interface{} + + for _, v := range s.data { + l = append(l, v...) + } + + return l +} + +func (s *Set) String() string { + r := "" + + for _, av := range s.data { + for _, v := range av { + r += fmt.Sprint(v) + } + } + + return r +} + +type BitSet struct { + data map[int]bool +} + +func NewBitSet() *BitSet { + b := new(BitSet) + b.data = make(map[int]bool) + return b +} + +func (b *BitSet) add(value int) { + b.data[value] = true +} + +func (b *BitSet) clear(index int) { + delete(b.data, index) +} + +func (b *BitSet) or(set *BitSet) { + for k := range set.data { + b.add(k) + } +} + +func (b *BitSet) remove(value int) { + delete(b.data, value) +} + +func (b *BitSet) contains(value int) bool { + return b.data[value] +} + +func (b *BitSet) values() []int { + ks := make([]int, len(b.data)) + i := 0 + for k := range b.data { + ks[i] = k + i++ + } + sort.Ints(ks) + return ks +} + +func (b *BitSet) minValue() int { + min := 2147483647 + + for k := range b.data { + if k < min { + min = k + } + } + + return min +} + +func (b *BitSet) equals(other interface{}) bool { + otherBitSet, ok := other.(*BitSet) + if !ok { + return false + } + + if len(b.data) != len(otherBitSet.data) { + return false + } + + for k, v := range b.data { + if otherBitSet.data[k] != v { + return false + } + } + + return true +} + +func (b *BitSet) length() int { + return len(b.data) +} + +func (b *BitSet) String() string { + vals := b.values() + valsS := make([]string, len(vals)) + + for i, val := range vals { + valsS[i] = strconv.Itoa(val) + } + return "{" + strings.Join(valsS, ", ") + "}" +} + +type AltDict struct { + data map[string]interface{} +} + +func NewAltDict() *AltDict { + d := new(AltDict) + d.data = make(map[string]interface{}) + return d +} + +func (a *AltDict) Get(key string) interface{} { + key = "k-" + key + return a.data[key] +} + +func (a *AltDict) put(key string, value interface{}) { + key = "k-" + key + a.data[key] = value +} + +func (a *AltDict) values() []interface{} { + vs := make([]interface{}, len(a.data)) + i := 0 + for _, v := range a.data { + vs[i] = v + i++ + } + return vs +} + +type DoubleDict struct { + data map[int]map[int]interface{} +} + +func NewDoubleDict() *DoubleDict { + dd := new(DoubleDict) + dd.data = make(map[int]map[int]interface{}) + return dd +} + +func (d *DoubleDict) Get(a, b int) interface{} { + data := d.data[a] + + if data == nil { + return nil + } + + return data[b] +} + +func (d *DoubleDict) set(a, b int, o interface{}) { + data := d.data[a] + + if data == nil { + data = make(map[int]interface{}) + d.data[a] = data + } + + data[b] = o +} + +func EscapeWhitespace(s string, escapeSpaces bool) string { + + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + if escapeSpaces { + s = strings.Replace(s, " ", "\u00B7", -1) + } + return s +} + +func TerminalNodeToStringArray(sa []TerminalNode) []string { + st := make([]string, len(sa)) + + for i, s := range sa { + st[i] = fmt.Sprintf("%v", s) + } + + return st +} + +func PrintArrayJavaStyle(sa []string) string { + var buffer bytes.Buffer + + buffer.WriteString("[") + + for i, s := range sa { + buffer.WriteString(s) + if i != len(sa)-1 { + buffer.WriteString(", ") + } + } + + buffer.WriteString("]") + + return buffer.String() +} + +// The following routines were lifted from bits.rotate* available in Go 1.9. + +const uintSize = 32 << (^uint(0) >> 32 & 1) // 32 or 64 + +// rotateLeft returns the value of x rotated left by (k mod UintSize) bits. +// To rotate x right by k bits, call RotateLeft(x, -k). +func rotateLeft(x uint, k int) uint { + if uintSize == 32 { + return uint(rotateLeft32(uint32(x), k)) + } + return uint(rotateLeft64(uint64(x), k)) +} + +// rotateLeft32 returns the value of x rotated left by (k mod 32) bits. +func rotateLeft32(x uint32, k int) uint32 { + const n = 32 + s := uint(k) & (n - 1) + return x<>(n-s) +} + +// rotateLeft64 returns the value of x rotated left by (k mod 64) bits. +func rotateLeft64(x uint64, k int) uint64 { + const n = 64 + s := uint(k) & (n - 1) + return x<>(n-s) +} + + +// murmur hash +const ( + c1_32 uint = 0xCC9E2D51 + c2_32 uint = 0x1B873593 + n1_32 uint = 0xE6546B64 +) + +func murmurInit(seed int) int { + return seed +} + +func murmurUpdate(h1 int, k1 int) int { + var k1u uint + k1u = uint(k1) * c1_32 + k1u = rotateLeft(k1u, 15) + k1u *= c2_32 + + var h1u = uint(h1) ^ k1u + k1u = rotateLeft(k1u, 13) + h1u = h1u*5 + 0xe6546b64 + return int(h1u) +} + +func murmurFinish(h1 int, numberOfWords int) int { + var h1u uint = uint(h1) + h1u ^= uint(numberOfWords * 4) + h1u ^= h1u >> 16 + h1u *= uint(0x85ebca6b) + h1u ^= h1u >> 13 + h1u *= 0xc2b2ae35 + h1u ^= h1u >> 16 + + return int(h1u) +} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 00000000000..c33dcc7c928 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 00000000000..1c95f59782b --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, ErrNotExist) { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 00000000000..a733bef18c0 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,169 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml new file mode 100644 index 00000000000..4b865d194a6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.6 + +branches: + only: + - master + +script: make test testrace diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 00000000000..82b4de97c7e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 00000000000..b97cd6ed02b --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 00000000000..ead5830f7b7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,97 @@ +# go-multierror + +[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: https://travis-ci.org/hashicorp/go-multierror +[godocs]: https://godoc.org/github.com/hashicorp/go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` implements the +[errwrap](https://github.com/hashicorp/errwrap) interface so that it can +be used with that library, as well. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-multierror + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 00000000000..775b6e753e7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,41 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 00000000000..aab8e9abec9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 00000000000..6c7a3cc91de --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\n* %s", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\n%s", + len(es), strings.Join(points, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 00000000000..2ea08273290 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,51 @@ +package multierror + +import ( + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. +// It is an implementatin of the errwrap.Wrapper interface so that +// multierror.Error can be used with that library. +// +// This method is not safe to be called concurrently and is no different +// than accessing the Errors field directly. It is implementd only to +// satisfy the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + return e.Errors +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 00000000000..5c477abe44f --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/oklog/ulid/.gitignore b/vendor/github.com/oklog/ulid/.gitignore new file mode 100644 index 00000000000..c92c4d56084 --- /dev/null +++ b/vendor/github.com/oklog/ulid/.gitignore @@ -0,0 +1,29 @@ +#### joe made this: http://goel.io/joe + +#####=== Go ===##### + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + diff --git a/vendor/github.com/oklog/ulid/.travis.yml b/vendor/github.com/oklog/ulid/.travis.yml new file mode 100644 index 00000000000..43eb762fa34 --- /dev/null +++ b/vendor/github.com/oklog/ulid/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: false +go: + - 1.10.x +install: + - go get -v github.com/golang/lint/golint + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - go get -d -t -v ./... + - go build -v ./... +script: + - go vet ./... + - $HOME/gopath/bin/golint . + - go test -v -race ./... + - go test -v -covermode=count -coverprofile=cov.out + - $HOME/gopath/bin/goveralls -coverprofile=cov.out -service=travis-ci -repotoken "$COVERALLS_TOKEN" || true diff --git a/vendor/github.com/oklog/ulid/AUTHORS.md b/vendor/github.com/oklog/ulid/AUTHORS.md new file mode 100644 index 00000000000..95581c78b06 --- /dev/null +++ b/vendor/github.com/oklog/ulid/AUTHORS.md @@ -0,0 +1,2 @@ +- Peter Bourgon (@peterbourgon) +- Tomás Senart (@tsenart) diff --git a/vendor/github.com/oklog/ulid/CHANGELOG.md b/vendor/github.com/oklog/ulid/CHANGELOG.md new file mode 100644 index 00000000000..8da38c6b00d --- /dev/null +++ b/vendor/github.com/oklog/ulid/CHANGELOG.md @@ -0,0 +1,33 @@ +## 1.3.1 / 2018-10-02 + +* Use underlying entropy source for random increments in Monotonic (#32) + +## 1.3.0 / 2018-09-29 + +* Monotonic entropy support (#31) + +## 1.2.0 / 2018-09-09 + +* Add a function to convert Unix time in milliseconds back to time.Time (#30) + +## 1.1.0 / 2018-08-15 + +* Ensure random part is always read from the entropy reader in full (#28) + +## 1.0.0 / 2018-07-29 + +* Add ParseStrict and MustParseStrict functions (#26) +* Enforce overflow checking when parsing (#20) + +## 0.3.0 / 2017-01-03 + +* Implement ULID.Compare method + +## 0.2.0 / 2016-12-13 + +* Remove year 2262 Timestamp bug. (#1) +* Gracefully handle invalid encodings when parsing. + +## 0.1.0 / 2016-12-06 + +* First ULID release diff --git a/vendor/github.com/oklog/ulid/CONTRIBUTING.md b/vendor/github.com/oklog/ulid/CONTRIBUTING.md new file mode 100644 index 00000000000..68f03f26eba --- /dev/null +++ b/vendor/github.com/oklog/ulid/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +We use GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first propose your ideas + in a Github issue. This will avoid unnecessary work and surely give + you and us a good deal of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/oklog/ulid/Gopkg.lock b/vendor/github.com/oklog/ulid/Gopkg.lock new file mode 100644 index 00000000000..349b449a6ea --- /dev/null +++ b/vendor/github.com/oklog/ulid/Gopkg.lock @@ -0,0 +1,15 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/pborman/getopt" + packages = ["v2"] + revision = "7148bc3a4c3008adfcab60cbebfd0576018f330b" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "6779b05abd5cd429c5393641d2453005a3cb74a400d161b2b5c5d0ca2e10e116" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/oklog/ulid/Gopkg.toml b/vendor/github.com/oklog/ulid/Gopkg.toml new file mode 100644 index 00000000000..624a7a019c7 --- /dev/null +++ b/vendor/github.com/oklog/ulid/Gopkg.toml @@ -0,0 +1,26 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + branch = "master" + name = "github.com/pborman/getopt" diff --git a/vendor/github.com/oklog/ulid/LICENSE b/vendor/github.com/oklog/ulid/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/oklog/ulid/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/ulid/README.md b/vendor/github.com/oklog/ulid/README.md new file mode 100644 index 00000000000..0a3d2f82b25 --- /dev/null +++ b/vendor/github.com/oklog/ulid/README.md @@ -0,0 +1,150 @@ +# Universally Unique Lexicographically Sortable Identifier + +![Project status](https://img.shields.io/badge/version-1.3.0-yellow.svg) +[![Build Status](https://secure.travis-ci.org/oklog/ulid.png)](http://travis-ci.org/oklog/ulid) +[![Go Report Card](https://goreportcard.com/badge/oklog/ulid?cache=0)](https://goreportcard.com/report/oklog/ulid) +[![Coverage Status](https://coveralls.io/repos/github/oklog/ulid/badge.svg?branch=master&cache=0)](https://coveralls.io/github/oklog/ulid?branch=master) +[![GoDoc](https://godoc.org/github.com/oklog/ulid?status.svg)](https://godoc.org/github.com/oklog/ulid) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE) + +A Go port of [alizain/ulid](https://github.com/alizain/ulid) with binary format implemented. + +## Background + +A GUID/UUID can be suboptimal for many use-cases because: + +- It isn't the most character efficient way of encoding 128 bits +- UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address +- UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures +- UUID v4 provides no other information than randomness which can cause fragmentation in many data structures + +A ULID however: + +- Is compatible with UUID/GUID's +- 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact) +- Lexicographically sortable +- Canonically encoded as a 26 character string, as opposed to the 36 character UUID +- Uses Crockford's base32 for better efficiency and readability (5 bits per character) +- Case insensitive +- No special characters (URL safe) +- Monotonic sort order (correctly detects and handles the same millisecond) + +## Install + +```shell +go get github.com/oklog/ulid +``` + +## Usage + +An ULID is constructed with a `time.Time` and an `io.Reader` entropy source. +This design allows for greater flexibility in choosing your trade-offs. + +Please note that `rand.Rand` from the `math` package is *not* safe for concurrent use. +Instantiate one per long living go-routine or use a `sync.Pool` if you want to avoid the potential contention of a locked `rand.Source` as its been frequently observed in the package level functions. + + +```go +func ExampleULID() { + t := time.Unix(1000000, 0) + entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0) + fmt.Println(ulid.MustNew(ulid.Timestamp(t), entropy)) + // Output: 0000XSNJG0MQJHBF4QX1EFD6Y3 +} + +``` + +## Specification + +Below is the current specification of ULID as implemented in this repository. + +### Components + +**Timestamp** +- 48 bits +- UNIX-time in milliseconds +- Won't run out of space till the year 10895 AD + +**Entropy** +- 80 bits +- User defined entropy source. +- Monotonicity within the same millisecond with [`ulid.Monotonic`](https://godoc.org/github.com/oklog/ulid#Monotonic) + +### Encoding + +[Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown. +This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse. + +``` +0123456789ABCDEFGHJKMNPQRSTVWXYZ +``` + +### Binary Layout and Byte Order + +The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order). + +``` +0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_time_high | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 16_bit_uint_time_low | 16_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +``` + +### String Representation + +``` + 01AN4Z07BY 79KA1307SR9X4MV3 +|----------| |----------------| + Timestamp Entropy + 10 chars 16 chars + 48bits 80bits + base32 base32 +``` + +## Test + +```shell +go test ./... +``` + +## Benchmarks + +On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1 + +``` +BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op +BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op +BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op +BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op +BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op +BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op +BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op +BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op +BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op +BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op +BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op +BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op +BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op +BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op +BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op +BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op +BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op +BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op +BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op +``` + +## Prior Art + +- [alizain/ulid](https://github.com/alizain/ulid) +- [RobThree/NUlid](https://github.com/RobThree/NUlid) +- [imdario/go-ulid](https://github.com/imdario/go-ulid) diff --git a/vendor/github.com/oklog/ulid/ulid.go b/vendor/github.com/oklog/ulid/ulid.go new file mode 100644 index 00000000000..c5d0d66fd2a --- /dev/null +++ b/vendor/github.com/oklog/ulid/ulid.go @@ -0,0 +1,614 @@ +// Copyright 2016 The Oklog Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ulid + +import ( + "bufio" + "bytes" + "database/sql/driver" + "encoding/binary" + "errors" + "io" + "math" + "math/bits" + "math/rand" + "time" +) + +/* +An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier + + The components are encoded as 16 octets. + Each component is encoded with the MSB first (network byte order). + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 16_bit_uint_time_low | 16_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +*/ +type ULID [16]byte + +var ( + // ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong + // data size. + ErrDataSize = errors.New("ulid: bad data size when unmarshaling") + + // ErrInvalidCharacters is returned when parsing or unmarshaling ULIDs with + // invalid Base32 encodings. + ErrInvalidCharacters = errors.New("ulid: bad data characters when unmarshaling") + + // ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient + // size. + ErrBufferSize = errors.New("ulid: bad buffer size when marshaling") + + // ErrBigTime is returned when constructing an ULID with a time that is larger + // than MaxTime. + ErrBigTime = errors.New("ulid: time too big") + + // ErrOverflow is returned when unmarshaling a ULID whose first character is + // larger than 7, thereby exceeding the valid bit depth of 128. + ErrOverflow = errors.New("ulid: overflow when unmarshaling") + + // ErrMonotonicOverflow is returned by a Monotonic entropy source when + // incrementing the previous ULID's entropy bytes would result in overflow. + ErrMonotonicOverflow = errors.New("ulid: monotonic entropy overflow") + + // ErrScanValue is returned when the value passed to scan cannot be unmarshaled + // into the ULID. + ErrScanValue = errors.New("ulid: source value must be a string or byte slice") +) + +// New returns an ULID with the given Unix milliseconds timestamp and an +// optional entropy source. Use the Timestamp function to convert +// a time.Time to Unix milliseconds. +// +// ErrBigTime is returned when passing a timestamp bigger than MaxTime. +// Reading from the entropy source may also return an error. +func New(ms uint64, entropy io.Reader) (id ULID, err error) { + if err = id.SetTime(ms); err != nil { + return id, err + } + + switch e := entropy.(type) { + case nil: + return id, err + case *monotonic: + err = e.MonotonicRead(ms, id[6:]) + default: + _, err = io.ReadFull(e, id[6:]) + } + + return id, err +} + +// MustNew is a convenience function equivalent to New that panics on failure +// instead of returning an error. +func MustNew(ms uint64, entropy io.Reader) ULID { + id, err := New(ms, entropy) + if err != nil { + panic(err) + } + return id +} + +// Parse parses an encoded ULID, returning an error in case of failure. +// +// ErrDataSize is returned if the len(ulid) is different from an encoded +// ULID's length. Invalid encodings produce undefined ULIDs. For a version that +// returns an error instead, see ParseStrict. +func Parse(ulid string) (id ULID, err error) { + return id, parse([]byte(ulid), false, &id) +} + +// ParseStrict parses an encoded ULID, returning an error in case of failure. +// +// It is like Parse, but additionally validates that the parsed ULID consists +// only of valid base32 characters. It is slightly slower than Parse. +// +// ErrDataSize is returned if the len(ulid) is different from an encoded +// ULID's length. Invalid encodings return ErrInvalidCharacters. +func ParseStrict(ulid string) (id ULID, err error) { + return id, parse([]byte(ulid), true, &id) +} + +func parse(v []byte, strict bool, id *ULID) error { + // Check if a base32 encoded ULID is the right length. + if len(v) != EncodedSize { + return ErrDataSize + } + + // Check if all the characters in a base32 encoded ULID are part of the + // expected base32 character set. + if strict && + (dec[v[0]] == 0xFF || + dec[v[1]] == 0xFF || + dec[v[2]] == 0xFF || + dec[v[3]] == 0xFF || + dec[v[4]] == 0xFF || + dec[v[5]] == 0xFF || + dec[v[6]] == 0xFF || + dec[v[7]] == 0xFF || + dec[v[8]] == 0xFF || + dec[v[9]] == 0xFF || + dec[v[10]] == 0xFF || + dec[v[11]] == 0xFF || + dec[v[12]] == 0xFF || + dec[v[13]] == 0xFF || + dec[v[14]] == 0xFF || + dec[v[15]] == 0xFF || + dec[v[16]] == 0xFF || + dec[v[17]] == 0xFF || + dec[v[18]] == 0xFF || + dec[v[19]] == 0xFF || + dec[v[20]] == 0xFF || + dec[v[21]] == 0xFF || + dec[v[22]] == 0xFF || + dec[v[23]] == 0xFF || + dec[v[24]] == 0xFF || + dec[v[25]] == 0xFF) { + return ErrInvalidCharacters + } + + // Check if the first character in a base32 encoded ULID will overflow. This + // happens because the base32 representation encodes 130 bits, while the + // ULID is only 128 bits. + // + // See https://github.com/oklog/ulid/issues/9 for details. + if v[0] > '7' { + return ErrOverflow + } + + // Use an optimized unrolled loop (from https://github.com/RobThree/NUlid) + // to decode a base32 ULID. + + // 6 bytes timestamp (48 bits) + (*id)[0] = ((dec[v[0]] << 5) | dec[v[1]]) + (*id)[1] = ((dec[v[2]] << 3) | (dec[v[3]] >> 2)) + (*id)[2] = ((dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4)) + (*id)[3] = ((dec[v[5]] << 4) | (dec[v[6]] >> 1)) + (*id)[4] = ((dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3)) + (*id)[5] = ((dec[v[8]] << 5) | dec[v[9]]) + + // 10 bytes of entropy (80 bits) + (*id)[6] = ((dec[v[10]] << 3) | (dec[v[11]] >> 2)) + (*id)[7] = ((dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4)) + (*id)[8] = ((dec[v[13]] << 4) | (dec[v[14]] >> 1)) + (*id)[9] = ((dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3)) + (*id)[10] = ((dec[v[16]] << 5) | dec[v[17]]) + (*id)[11] = ((dec[v[18]] << 3) | dec[v[19]]>>2) + (*id)[12] = ((dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4)) + (*id)[13] = ((dec[v[21]] << 4) | (dec[v[22]] >> 1)) + (*id)[14] = ((dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3)) + (*id)[15] = ((dec[v[24]] << 5) | dec[v[25]]) + + return nil +} + +// MustParse is a convenience function equivalent to Parse that panics on failure +// instead of returning an error. +func MustParse(ulid string) ULID { + id, err := Parse(ulid) + if err != nil { + panic(err) + } + return id +} + +// MustParseStrict is a convenience function equivalent to ParseStrict that +// panics on failure instead of returning an error. +func MustParseStrict(ulid string) ULID { + id, err := ParseStrict(ulid) + if err != nil { + panic(err) + } + return id +} + +// String returns a lexicographically sortable string encoded ULID +// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3 +// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy +func (id ULID) String() string { + ulid := make([]byte, EncodedSize) + _ = id.MarshalTextTo(ulid) + return string(ulid) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface by +// returning the ULID as a byte slice. +func (id ULID) MarshalBinary() ([]byte, error) { + ulid := make([]byte, len(id)) + return ulid, id.MarshalBinaryTo(ulid) +} + +// MarshalBinaryTo writes the binary encoding of the ULID to the given buffer. +// ErrBufferSize is returned when the len(dst) != 16. +func (id ULID) MarshalBinaryTo(dst []byte) error { + if len(dst) != len(id) { + return ErrBufferSize + } + + copy(dst, id[:]) + return nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by +// copying the passed data and converting it to an ULID. ErrDataSize is +// returned if the data length is different from ULID length. +func (id *ULID) UnmarshalBinary(data []byte) error { + if len(data) != len(*id) { + return ErrDataSize + } + + copy((*id)[:], data) + return nil +} + +// Encoding is the base 32 encoding alphabet used in ULID strings. +const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" + +// MarshalText implements the encoding.TextMarshaler interface by +// returning the string encoded ULID. +func (id ULID) MarshalText() ([]byte, error) { + ulid := make([]byte, EncodedSize) + return ulid, id.MarshalTextTo(ulid) +} + +// MarshalTextTo writes the ULID as a string to the given buffer. +// ErrBufferSize is returned when the len(dst) != 26. +func (id ULID) MarshalTextTo(dst []byte) error { + // Optimized unrolled loop ahead. + // From https://github.com/RobThree/NUlid + + if len(dst) != EncodedSize { + return ErrBufferSize + } + + // 10 byte timestamp + dst[0] = Encoding[(id[0]&224)>>5] + dst[1] = Encoding[id[0]&31] + dst[2] = Encoding[(id[1]&248)>>3] + dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)] + dst[4] = Encoding[(id[2]&62)>>1] + dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)] + dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)] + dst[7] = Encoding[(id[4]&124)>>2] + dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)] + dst[9] = Encoding[id[5]&31] + + // 16 bytes of entropy + dst[10] = Encoding[(id[6]&248)>>3] + dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)] + dst[12] = Encoding[(id[7]&62)>>1] + dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)] + dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)] + dst[15] = Encoding[(id[9]&124)>>2] + dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)] + dst[17] = Encoding[id[10]&31] + dst[18] = Encoding[(id[11]&248)>>3] + dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)] + dst[20] = Encoding[(id[12]&62)>>1] + dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)] + dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)] + dst[23] = Encoding[(id[14]&124)>>2] + dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)] + dst[25] = Encoding[id[15]&31] + + return nil +} + +// Byte to index table for O(1) lookups when unmarshaling. +// We use 0xFF as sentinel value for invalid indexes. +var dec = [...]byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, + 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, + 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, + 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, + 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, + 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, + 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +} + +// EncodedSize is the length of a text encoded ULID. +const EncodedSize = 26 + +// UnmarshalText implements the encoding.TextUnmarshaler interface by +// parsing the data as string encoded ULID. +// +// ErrDataSize is returned if the len(v) is different from an encoded +// ULID's length. Invalid encodings produce undefined ULIDs. +func (id *ULID) UnmarshalText(v []byte) error { + return parse(v, false, id) +} + +// Time returns the Unix time in milliseconds encoded in the ULID. +// Use the top level Time function to convert the returned value to +// a time.Time. +func (id ULID) Time() uint64 { + return uint64(id[5]) | uint64(id[4])<<8 | + uint64(id[3])<<16 | uint64(id[2])<<24 | + uint64(id[1])<<32 | uint64(id[0])<<40 +} + +// maxTime is the maximum Unix time in milliseconds that can be +// represented in an ULID. +var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time() + +// MaxTime returns the maximum Unix time in milliseconds that +// can be encoded in an ULID. +func MaxTime() uint64 { return maxTime } + +// Now is a convenience function that returns the current +// UTC time in Unix milliseconds. Equivalent to: +// Timestamp(time.Now().UTC()) +func Now() uint64 { return Timestamp(time.Now().UTC()) } + +// Timestamp converts a time.Time to Unix milliseconds. +// +// Because of the way ULID stores time, times from the year +// 10889 produces undefined results. +func Timestamp(t time.Time) uint64 { + return uint64(t.Unix())*1000 + + uint64(t.Nanosecond()/int(time.Millisecond)) +} + +// Time converts Unix milliseconds in the format +// returned by the Timestamp function to a time.Time. +func Time(ms uint64) time.Time { + s := int64(ms / 1e3) + ns := int64((ms % 1e3) * 1e6) + return time.Unix(s, ns) +} + +// SetTime sets the time component of the ULID to the given Unix time +// in milliseconds. +func (id *ULID) SetTime(ms uint64) error { + if ms > maxTime { + return ErrBigTime + } + + (*id)[0] = byte(ms >> 40) + (*id)[1] = byte(ms >> 32) + (*id)[2] = byte(ms >> 24) + (*id)[3] = byte(ms >> 16) + (*id)[4] = byte(ms >> 8) + (*id)[5] = byte(ms) + + return nil +} + +// Entropy returns the entropy from the ULID. +func (id ULID) Entropy() []byte { + e := make([]byte, 10) + copy(e, id[6:]) + return e +} + +// SetEntropy sets the ULID entropy to the passed byte slice. +// ErrDataSize is returned if len(e) != 10. +func (id *ULID) SetEntropy(e []byte) error { + if len(e) != 10 { + return ErrDataSize + } + + copy((*id)[6:], e) + return nil +} + +// Compare returns an integer comparing id and other lexicographically. +// The result will be 0 if id==other, -1 if id < other, and +1 if id > other. +func (id ULID) Compare(other ULID) int { + return bytes.Compare(id[:], other[:]) +} + +// Scan implements the sql.Scanner interface. It supports scanning +// a string or byte slice. +func (id *ULID) Scan(src interface{}) error { + switch x := src.(type) { + case nil: + return nil + case string: + return id.UnmarshalText([]byte(x)) + case []byte: + return id.UnmarshalBinary(x) + } + + return ErrScanValue +} + +// Value implements the sql/driver.Valuer interface. This returns the value +// represented as a byte slice. If instead a string is desirable, a wrapper +// type can be created that calls String(). +// +// // stringValuer wraps a ULID as a string-based driver.Valuer. +// type stringValuer ULID +// +// func (id stringValuer) Value() (driver.Value, error) { +// return ULID(id).String(), nil +// } +// +// // Example usage. +// db.Exec("...", stringValuer(id)) +func (id ULID) Value() (driver.Value, error) { + return id.MarshalBinary() +} + +// Monotonic returns an entropy source that is guaranteed to yield +// strictly increasing entropy bytes for the same ULID timestamp. +// On conflicts, the previous ULID entropy is incremented with a +// random number between 1 and `inc` (inclusive). +// +// The provided entropy source must actually yield random bytes or else +// monotonic reads are not guaranteed to terminate, since there isn't +// enough randomness to compute an increment number. +// +// When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`. +// The lower the value of `inc`, the easier the next ULID within the +// same millisecond is to guess. If your code depends on ULIDs having +// secure entropy bytes, then don't go under this default unless you know +// what you're doing. +// +// The returned io.Reader isn't safe for concurrent use. +func Monotonic(entropy io.Reader, inc uint64) io.Reader { + m := monotonic{ + Reader: bufio.NewReader(entropy), + inc: inc, + } + + if m.inc == 0 { + m.inc = math.MaxUint32 + } + + if rng, ok := entropy.(*rand.Rand); ok { + m.rng = rng + } + + return &m +} + +type monotonic struct { + io.Reader + ms uint64 + inc uint64 + entropy uint80 + rand [8]byte + rng *rand.Rand +} + +func (m *monotonic) MonotonicRead(ms uint64, entropy []byte) (err error) { + if !m.entropy.IsZero() && m.ms == ms { + err = m.increment() + m.entropy.AppendTo(entropy) + } else if _, err = io.ReadFull(m.Reader, entropy); err == nil { + m.ms = ms + m.entropy.SetBytes(entropy) + } + return err +} + +// increment the previous entropy number with a random number +// of up to m.inc (inclusive). +func (m *monotonic) increment() error { + if inc, err := m.random(); err != nil { + return err + } else if m.entropy.Add(inc) { + return ErrMonotonicOverflow + } + return nil +} + +// random returns a uniform random value in [1, m.inc), reading entropy +// from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1. +// Adapted from: https://golang.org/pkg/crypto/rand/#Int +func (m *monotonic) random() (inc uint64, err error) { + if m.inc <= 1 { + return 1, nil + } + + // Fast path for using a underlying rand.Rand directly. + if m.rng != nil { + // Range: [1, m.inc) + return 1 + uint64(m.rng.Int63n(int64(m.inc))), nil + } + + // bitLen is the maximum bit length needed to encode a value < m.inc. + bitLen := bits.Len64(m.inc) + + // byteLen is the maximum byte length needed to encode a value < m.inc. + byteLen := uint(bitLen+7) / 8 + + // msbitLen is the number of bits in the most significant byte of m.inc-1. + msbitLen := uint(bitLen % 8) + if msbitLen == 0 { + msbitLen = 8 + } + + for inc == 0 || inc >= m.inc { + if _, err = io.ReadFull(m.Reader, m.rand[:byteLen]); err != nil { + return 0, err + } + + // Clear bits in the first byte to increase the probability + // that the candidate is < m.inc. + m.rand[0] &= uint8(int(1<[:]}`. +// The field name is mandatory. If no is given, then the value +// will be printed using `v` as verb. The prefix modifiers '+', '#', and +// '@'(=alias for '#') change how the argument will be printed, similar to +// normal verb flags. +// More complex formatting directives can be configured after the `:`. The uses the same syntax. +// +// For example: +// +// Printf(cb, "hello %v", "world") +// +// will just print hello world. But: +// +// Printf(cb, "hello %{who}", "world") +// +// will call your callback with like this: `cb("who", 0, "world")` +// +// We can print an padded integer with a text with of 5 digits like this: +// +// Printf(cb, "%{value:05d}", 23) +// +// This will print '00023' thanks to the '0' flag and call the provided callback. +// +// Named and anonymous formatting using can be freely mixed. The callback will only +// be called if a named field or error value is encountered. +// +// The printf-style functions in ctxfmt all respect the fmt.Stringer, +// fmt.GoStringer, and fmt.Formatter interfaces. +package ctxfmt + +import ( + "io" + "os" + "strings" +) + +type CB func(key string, idx int, val interface{}) + +// Printf formats according to the format specifier and writes to stdout. +// It returns the unprocessed arguments. +func Printf(cb CB, msg string, vs ...interface{}) (rest []interface{}, n int, err error) { + return Fprintf(os.Stdout, cb, msg, vs...) +} + +// Sprintf formats according to the format specifier and returns the resulting +// string and the list of unprocessed arguments. +func Sprintf(cb CB, msg string, vs ...interface{}) (string, []interface{}) { + var buf strings.Builder + rest, _, _ := Fprintf(&buf, cb, msg, vs...) + return buf.String(), rest +} + +// Fprintf formats according to the format specifier and writes to w. +// It returns the unprocessed arguments. +func Fprintf(w io.Writer, cb CB, msg string, vs ...interface{}) (rest []interface{}, n int, err error) { + printer := &printer{To: w} + in := &interpreter{ + cb: cb, + p: printer, + args: argstate{args: vs}, + } + parser := &parser{handler: in} + parser.parse(msg) + + used := in.args.idx + if used >= len(vs) { + return nil, printer.written, printer.err + } + + // collect errors from extra variables + rest = vs[used:] + for i := range rest { + if isErrorValue(rest[i]) { + cb("", used+i, rest[i]) + } + } + return rest, printer.written, printer.err +} diff --git a/vendor/github.com/urso/diag/ctxfmt/error.go b/vendor/github.com/urso/diag/ctxfmt/error.go new file mode 100644 index 00000000000..d39128def18 --- /dev/null +++ b/vendor/github.com/urso/diag/ctxfmt/error.go @@ -0,0 +1,17 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package ctxfmt + +import "errors" + +var ( + errInvalidVerb = errors.New("invalid verb") + errNoVerb = errors.New("no verb") + errCloseMissing = errors.New("missing '}'") + errNoFieldName = errors.New("field name missing") + errMissingArg = errors.New("missing arg") +) diff --git a/vendor/github.com/urso/diag/ctxfmt/interpret.go b/vendor/github.com/urso/diag/ctxfmt/interpret.go new file mode 100644 index 00000000000..6d58f5144e4 --- /dev/null +++ b/vendor/github.com/urso/diag/ctxfmt/interpret.go @@ -0,0 +1,1146 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ctxfmt + +import ( + "fmt" + "reflect" + "strconv" + "unicode/utf8" +) + +type interpreter struct { + p *printer + args argstate + st state + cb CB + + fmtBuf [128]byte +} + +type state struct { + inError bool + inPanic bool + arg interface{} + val reflect.Value +} + +type formatterState struct { + *printer + tok *formatToken +} + +const lHexDigits = "0123456789abcdefx" +const uHexDigits = "0123456789ABCDEFX" + +func (in *interpreter) onString(s string) { + in.p.WriteString(s) +} + +func (in *interpreter) onToken(tok formatToken) { + arg, argIdx, exists := in.args.next() + if !exists { + in.formatErr(&tok, exists, arg, errMissingArg) + return + } + + if tok.flags.named || isErrorValue(arg) || isFieldValue(arg) { + in.cb(tok.field, argIdx, arg) + } + + in.formatArg(&tok, arg) +} + +func (in *interpreter) onParseError(tok formatToken, err error) { + arg, _, has := in.args.next() + in.formatErr(&tok, has, arg, err) +} + +func (in *interpreter) formatErr(tok *formatToken, hasArg bool, arg interface{}, err error) { + switch err { + case errInvalidVerb: + in.p.WriteString("%!") + in.p.WriteRune(tok.verb) + in.p.WriteString("(INVALID)") + if hasArg { + in.formatErrArg(tok, arg) + } + case errNoVerb: + in.p.WriteString("%!(NOVERB)") + if hasArg { + in.formatErrArg(tok, arg) + } + case errCloseMissing: + in.p.WriteString("%!(MISSING })") + case errNoFieldName: + in.p.WriteString("%!(NO FIELD)") + case errMissingArg: + in.p.WriteString("%!") + in.p.WriteRune(tok.verb) + in.p.WriteString("(MISSING)") + } +} + +func (in *interpreter) formatErrArg(tok *formatToken, arg interface{}) { + if arg == nil { + in.p.WriteString("()") + return + } + + in.p.WriteByte('(') + in.p.WriteString(reflect.TypeOf(arg).String()) + in.p.WriteByte('=') + tmpTok := *tok + tmpTok.verb = 'v' + in.formatArg(&tmpTok, arg) + in.p.WriteByte(')') +} + +func (in *interpreter) formatArg(tok *formatToken, arg interface{}) { + in.st.arg = arg + in.st.val = reflect.Value{} + verb := tok.verb + + if arg == nil { + switch verb { + case 'T', 'v': + in.formatPadString(tok, "") + default: + in.formatBadVerb(tok) + } + return + } + + // Type and pointer are special. Let's treat them first + switch verb { + case 'T': + in.fmtStr(tok, reflect.TypeOf(arg).String()) + return + case 'p': + in.fmtPointer(tok, reflect.ValueOf(arg)) + return + } + + // try to print primitive types without reflection + switch value := arg.(type) { + case bool: + in.fmtBool(tok, value) + case int: + in.fmtInt(tok, uint64(value), true) + case int8: + in.fmtInt(tok, uint64(value), true) + case int16: + in.fmtInt(tok, uint64(value), true) + case int32: + in.fmtInt(tok, uint64(value), true) + case int64: + in.fmtInt(tok, uint64(value), true) + case uint: + in.fmtInt(tok, uint64(value), false) + case uint8: + in.fmtInt(tok, uint64(value), false) + case uint16: + in.fmtInt(tok, uint64(value), false) + case uint32: + in.fmtInt(tok, uint64(value), false) + case uint64: + in.fmtInt(tok, value, false) + case uintptr: + in.fmtInt(tok, uint64(value), false) + case string: + in.fmtString(tok, value) + case []byte: + in.fmtBytes(tok, "[]byte", value) + case float32: + in.fmtFloat(tok, float64(value), 32) + case float64: + in.fmtFloat(tok, value, 64) + case complex64: + in.fmtComplex(tok, complex128(value), 64) + case complex128: + in.fmtComplex(tok, value, 128) + case reflect.Value: + in.fmtValue(tok, value, 0) + + default: + in.fmtValue(tok, reflect.ValueOf(arg), 0) + } +} + +func (in *interpreter) fmtValue(tok *formatToken, v reflect.Value, depth int) { + if in.handleMethods(tok, v) { + return + } + + in.st.arg = nil + in.st.val = v + + verb, flags := tok.verb, &tok.flags + + switch v.Kind() { + case reflect.Invalid: + switch { + case depth == 0: + in.p.WriteString("") + case verb == 'v': + in.p.WriteString("") + default: + in.formatBadVerb(tok) + } + + case reflect.Bool: + in.fmtBool(tok, v.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + in.fmtInt(tok, uint64(v.Int()), true) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + in.fmtInt(tok, v.Uint(), false) + case reflect.Float32: + in.fmtFloat(tok, v.Float(), 32) + case reflect.Float64: + in.fmtFloat(tok, v.Float(), 64) + case reflect.Complex64: + in.fmtComplex(tok, v.Complex(), 64) + case reflect.Complex128: + in.fmtComplex(tok, v.Complex(), 128) + case reflect.String: + in.fmtString(tok, v.String()) + case reflect.Ptr: + if depth == 0 && v.Pointer() != 0 { + elem := v.Elem() + switch elem.Kind() { + case reflect.Map, reflect.Struct, reflect.Array, reflect.Slice: + in.p.WriteByte('&') + in.fmtValue(tok, elem, depth+1) + return + } + } + fallthrough + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + in.fmtPointer(tok, v) + + case reflect.Interface: + switch elem := v.Elem(); { + case elem.IsValid(): + in.fmtValue(tok, elem, depth+1) + case flags.sharpV: + in.p.WriteString(v.Type().String()) + in.p.WriteString("(nil)") + default: + in.p.WriteString("") + } + + case reflect.Map: + if flags.sharpV { + in.p.WriteString(v.Type().String()) + if v.IsNil() { + in.p.WriteString("(nil)") + return + } + in.p.WriteByte('{') + } else { + in.p.WriteString("map[") + } + + for iter, i := newMapIter(v), 0; iter.Next(); i++ { + if i > 0 { + if flags.sharpV { + in.p.WriteString(", ") + } else { + in.p.WriteByte(' ') + } + } + + key := iter.Key() + val := iter.Value() + in.fmtValue(tok, key, depth+1) + in.p.WriteByte(':') + in.fmtValue(tok, val, depth+1) + } + + if flags.sharpV { + in.p.WriteByte('}') + } else { + in.p.WriteByte(']') + } + + case reflect.Struct: + if flags.sharpV { + in.p.WriteString(v.Type().String()) + } + in.p.WriteByte('{') + for i := 0; i < v.NumField(); i++ { + if i > 0 { + if flags.sharpV { + in.p.WriteString(", ") + } else { + in.p.WriteByte(' ') + } + } + + if flags.plusV || flags.sharpV { + if name := v.Type().Field(i).Name; name != "" { + in.p.WriteString(name) + in.p.WriteByte(':') + } + } + + fld := v.Field(i) + if fld.Kind() == reflect.Interface && !fld.IsNil() { + fld = fld.Elem() + } + in.fmtValue(tok, fld, depth+1) + } + in.p.WriteByte('}') + + case reflect.Array, reflect.Slice: + // handle variants of []byte + switch verb { + case 's', 'q', 'x', 'X': + t := v.Type() + if t.Elem().Kind() == reflect.Uint8 { + var bytes []byte + if v.Kind() == reflect.Slice { + bytes = v.Bytes() + } else if v.CanAddr() { + bytes = v.Slice(0, v.Len()).Bytes() + } else { + // Copy original bytes into tempoary buffer. + // TODO: can we read the original bytes via reflection/pointers? + bytes = make([]byte, v.Len()) + for i := range bytes { + bytes[i] = byte(v.Index(i).Uint()) + } + } + in.fmtBytes(tok, t.String(), bytes) + return + } + } + + if flags.sharpV { + in.p.WriteString(v.Type().String()) + if v.Kind() == reflect.Slice && v.IsNil() { + in.p.WriteString("(nil)") + return + } + + in.p.WriteByte('{') + for i := 0; i < v.Len(); i++ { + if i > 0 { + in.p.WriteString(", ") + } + in.fmtValue(tok, v.Index(i), depth+1) + } + in.p.WriteByte('}') + return + } + + in.p.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + in.p.WriteByte(' ') + } + in.fmtValue(tok, v.Index(i), depth+1) + } + in.p.WriteByte(']') + } +} + +func (in *interpreter) handleMethods(tok *formatToken, v reflect.Value) bool { + flags := &tok.flags + + if in.st.inError || !v.IsValid() || !v.CanInterface() { + return false + } + + arg := v.Interface() + if formatter, ok := arg.(fmt.Formatter); ok { + defer in.recoverPanic(tok, arg) + formatter.Format(&formatterState{in.p, tok}, rune(tok.verb)) + return true + } + + if flags.sharpV { + stringer, ok := arg.(fmt.GoStringer) + if ok { + defer in.recoverPanic(tok, arg) + in.fmtStr(tok, stringer.GoString()) + } + return ok + } + + switch tok.verb { + case 'v', 's', 'x', 'X', 'q': + break + default: + return false + } + + switch v := arg.(type) { + case error: + defer in.recoverPanic(tok, arg) + in.fmtString(tok, v.Error()) + case fmt.Stringer: + defer in.recoverPanic(tok, arg) + in.fmtString(tok, v.String()) + default: + return false + } + return true +} + +func (in *interpreter) recoverPanic(tok *formatToken, arg interface{}) { + p := in.p + + if err := recover(); err != nil { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Ptr && v.IsNil() { + p.WriteString("") + return + } + if in.st.inPanic { + // nested recursive panic. + panic(err) + } + + errTok := *tok + errTok.verb = 'v' + errTok.flags = flags{} + + p.WriteString("%!") + p.WriteRune(tok.verb) + p.WriteString("(PANIC=") + in.st.inPanic = true + in.formatArg(&errTok, err) + in.st.inPanic = false + p.WriteByte(')') + } +} + +func (in *interpreter) fmtBool(tok *formatToken, b bool) { + switch tok.verb { + case 't', 'v': + if b { + in.formatPadString(tok, "true") + } else { + in.formatPadString(tok, "false") + } + default: + in.formatBadVerb(tok) + } +} + +func (in *interpreter) fmtInt(tok *formatToken, v uint64, signed bool) { + flags := &tok.flags + verb := tok.verb + + switch verb { + case 'v': + if flags.sharpV && !signed { + in.fmtHex64(tok, v, true) + } else { + in.fmtIntBase(tok, v, 10, signed, lHexDigits) + } + case 'd': + in.fmtIntBase(tok, v, 10, signed, lHexDigits) + case 'b': + in.fmtIntBase(tok, v, 2, signed, lHexDigits) + case 'o', 'O': + in.fmtIntBase(tok, v, 8, signed, lHexDigits) + case 'x': + in.fmtIntBase(tok, v, 16, signed, lHexDigits) + case 'X': + in.fmtIntBase(tok, v, 16, signed, uHexDigits) + case 'c': + in.fmtRune(tok, v) + case 'q': + if v <= utf8.MaxRune { + in.fmtRuneQ(tok, v) + } else { + in.formatBadVerb(tok) + } + case 'U': + in.fmtUnicode(tok, v) + default: + in.formatBadVerb(tok) + } +} + +func (in *interpreter) fmtRune(tok *formatToken, v uint64) { + n := utf8.EncodeRune(in.fmtBuf[:], convRune(v)) + in.formatPad(tok, in.fmtBuf[:n]) +} + +func (in *interpreter) fmtRuneQ(tok *formatToken, v uint64) { + r := convRune(v) + buf := in.fmtBuf[:0] + if tok.flags.plus { + buf = strconv.AppendQuoteRuneToASCII(buf, r) + } else { + buf = strconv.AppendQuoteRune(buf, r) + } + in.formatPad(tok, buf) +} + +func (in *interpreter) fmtUnicode(tok *formatToken, u uint64) { + precision := 4 + + // prepare temporary format buffer + buf := in.fmtBuf[0:] + if tok.flags.hasPrecision && tok.precision > precision { + precision = tok.precision + width := 2 + precision + 2 + utf8.UTFMax + 1 + if width > len(buf) { + buf = make([]byte, width) + } + } + + // format from right to left + i := len(buf) + + // print rune with quote if '#' is set and rune is quoted + if tok.flags.sharp && u < utf8.MaxRune && strconv.IsPrint(rune(u)) { + i-- + buf[i] = '\'' + + i -= utf8.RuneLen(rune(u)) + utf8.EncodeRune(buf[i:], rune(u)) + + i -= 2 + copy(buf[i:], " '") + } + + // format hex digits + for u >= 16 { + i-- + buf[i] = uHexDigits[u&0x0F] + precision-- + u >>= 4 + } + i-- + buf[i] = uHexDigits[u] + precision-- + + // left-pad zeroes + for precision > 0 { + i-- + buf[i] = '0' + precision-- + } + + // leading 'U+' + i -= 2 + copy(buf[i:], "U+") + + // ensure we pad with ' ' always: + in.formatPadWidth(tok, buf[i:], ' ') +} + +func (in *interpreter) fmtIntBase(tok *formatToken, v uint64, base int, signed bool, digits string) { + flags := &tok.flags + + neg := signed && int64(v) < 0 + if neg { + v = -v + } + + buf := in.fmtBuf[:] + if flags.hasWidth || flags.hasPrecision { + width := 3 + tok.width + tok.precision + if width > len(buf) { + buf = make([]byte, width) + } + } + + precision := 0 + if flags.hasPrecision { + precision = tok.precision + if precision == 0 && v == 0 { + in.formatPaddingWith(tok.width, ' ') + return + } + } else if flags.zero && flags.hasWidth { + precision = tok.width + if neg || flags.plus || flags.space { + precision-- // reserve space for '-' sign + } + } + + // print right-to-left + i := len(buf) + switch base { + case 10: + for v >= 10 { + next := v / 10 + i-- + buf[i] = byte('0' + v - next*10) + v = next + } + case 16: + for ; v >= 16; v >>= 4 { + i-- + buf[i] = digits[v&0xF] + } + case 8: + for ; v >= 8; v >>= 3 { + i-- + buf[i] = byte('0' + v&7) + } + case 2: + for ; v >= 2; v >>= 1 { + i-- + buf[i] = byte('0' + v&1) + } + default: + panic("unknown base") + } + i-- + buf[i] = digits[v] + + // left-pad zeros + for i > 0 && precision > len(buf)-i { + i-- + buf[i] = '0' + } + + // '#' triggers prefix + if flags.sharp { + switch base { + case 2: + i-- + buf[i] = 'b' + i-- + buf[i] = '0' + case 8: + if buf[i] != '0' { + i-- + buf[i] = '0' + } + case 16: + i-- + buf[i] = digits[16] + i-- + buf[i] = '0' + } + } + + if tok.verb == 'O' { + i-- + buf[i] = 'o' + i-- + buf[i] = '0' + } + + // add sign + if neg { + i-- + buf[i] = '-' + } else if flags.plus { + i-- + buf[i] = '+' + } else if flags.space { + i-- + buf[i] = ' ' + } + + in.formatPadWidth(tok, buf[i:], ' ') +} + +func (in *interpreter) fmtString(tok *formatToken, s string) { + switch tok.verb { + case 'v': + if tok.flags.sharpV { + in.fmtQualified(tok, s) + } else { + in.fmtStr(tok, s) + } + case 's': + in.fmtStr(tok, s) + case 'x': + in.fmtStrHex(tok, s, lHexDigits) + case 'X': + in.fmtStrHex(tok, s, uHexDigits) + case 'q': + in.fmtQualified(tok, s) + default: + in.formatBadVerb(tok) + } +} + +func (in *interpreter) fmtHex64(tok *formatToken, v uint64, leading0x bool) { + tokFmt := *tok + tokFmt.flags.sharp = leading0x + in.fmtIntBase(&tokFmt, v, 16, false, lHexDigits) +} + +func (in *interpreter) fmtBytes(tok *formatToken, typeName string, b []byte) { + flags := &tok.flags + + switch tok.verb { + case 'v', 'd': + if flags.sharpV { // print hex dump of '#' is set + in.p.WriteString(typeName) + if b == nil { + in.p.WriteString("(nil)") + return + } + + in.p.WriteByte('{') + for i, c := range b { + if i > 0 { + in.p.WriteString(", ") + } + in.fmtHex64(tok, uint64(c), true) + } + in.p.WriteByte('}') + } else { // print base-10 digits if '#' is not set + in.p.WriteByte('[') + for i, c := range b { + if i > 0 { + in.p.WriteByte(' ') + } + in.fmtIntBase(tok, uint64(c), 10, false, lHexDigits) + } + in.p.WriteByte(']') + } + case 's': + in.fmtStr(tok, unsafeString(b)) + case 'x': + in.fmtBytesHex(tok, b, lHexDigits) + case 'X': + in.fmtBytesHex(tok, b, uHexDigits) + case 'q': + in.fmtQualified(tok, unsafeString(b)) + default: + in.fmtValue(tok, reflect.ValueOf(b), 0) + } +} + +func (in *interpreter) fmtStrHex(tok *formatToken, s string, digits string) { + in.fmtSBHex(tok, s, nil, digits) +} + +func (in *interpreter) fmtBytesHex(tok *formatToken, b []byte, digits string) { + in.fmtSBHex(tok, "", b, digits) +} + +func (in *interpreter) fmtSBHex(tok *formatToken, s string, b []byte, digits string) { + flags := &tok.flags + + N := len(b) + if b == nil { + N = len(s) + } + + if tok.flags.hasPrecision && tok.precision < N { + N = tok.precision + } + + // Compute total width of hex encoded string. Each byte requires 2 symbols. + // Codes are separates by a space character if the 'space' flag is set. + // Leading 0x will be added if the '#' modifier has been used. + width := 2 * N + if width <= 0 { + if flags.hasWidth { + in.formatPadding(tok, tok.width) + } + return + } + if flags.space { + if flags.sharp { + width *= 2 + } + width += N - 1 + } else if flags.sharp { + width += 2 + } + needsPadding := tok.width > width && flags.hasWidth + + // handle left padding if '-' modifier is not set. + if needsPadding && !flags.minus { + in.formatPadding(tok, tok.width-width) + } + + buf := in.fmtBuf[:] + if width >= len(buf) { + buf = make([]byte, width) + } else { + buf = buf[:width] + } + pos := 0 + + // write hex string + if flags.sharp { + buf[pos], buf[pos+1] = '0', digits[16] + pos += 2 + } + for i := 0; i < N; i++ { + if flags.space && i > 0 { + buf[pos] = ' ' + pos++ + if flags.sharp { + buf[pos], buf[pos+1] = '0', digits[16] + pos += 2 + } + } + + var c byte + if b != nil { + c = b[i] + } else { + c = s[i] + } + + buf[pos], buf[pos+1] = digits[c>>4], digits[c&0xf] + pos += 2 + } + in.p.Write(buf) + + // handle right padding if '-' modifier is set. + if needsPadding && flags.minus { + in.formatPadding(tok, tok.width-width) + } +} + +func (in *interpreter) fmtQualified(tok *formatToken, s string) { + flags := &tok.flags + s = in.truncate(tok, s) + + if flags.sharp && strconv.CanBackquote(s) { + in.formatPadString(tok, "`"+s+"`") + return + } + + buf := in.fmtBuf[:0] + if flags.plus { + buf = strconv.AppendQuoteToASCII(buf, s) + } else { + buf = strconv.AppendQuote(buf, s) + } + + in.formatPad(tok, buf) +} + +func (in *interpreter) fmtFloat(tok *formatToken, f float64, sz int) { + verb := tok.verb + switch verb { + case 'v': + in.fmtFloatBase(tok, f, sz, 'g', -1) + case 'b', 'g', 'G', 'x', 'X': + in.fmtFloatBase(tok, f, sz, verb, -1) + case 'f', 'e', 'E': + in.fmtFloatBase(tok, f, sz, verb, 6) + case 'F': + in.fmtFloatBase(tok, f, sz, 'f', 6) + default: + in.formatBadVerb(tok) + } +} + +func (in *interpreter) fmtFloatBase(tok *formatToken, f float64, sz int, verb rune, precision int) { + flags := &tok.flags + if flags.hasPrecision { + precision = tok.precision + } + + // format number + ensure sign is always present + buf := strconv.AppendFloat(in.fmtBuf[:1], f, byte(verb), precision, sz) + if buf[1] == '-' || buf[1] == '+' { + buf = buf[1:] + } else { + buf[0] = '+' + } + + // make '+' sign optional + if buf[0] == '+' && flags.space && !flags.plus { + buf[0] = ' ' + } + + // Ensure Inf and NaN values are not padded with '0' + if buf[1] == 'I' || buf[1] == 'N' { + if buf[1] == 'N' && !flags.space && !flags.plus { + buf = buf[1:] + } + in.formatPadWidth(tok, buf, ' ') + return + } + + // requred post-processing if '#' is set. + // -> print decimal point and retain/restore trailing zeros + if flags.sharp && verb != 'b' { + digits := 0 + switch verb { + case 'v', 'g', 'G': + digits = precision + if digits < 0 { + digits = 6 + } + } + + // expBuf holds the exponent, so we can overwrite the current + // buffer with the decimal + var expBuf [5]byte + exp := expBuf[:0] + + hasDecimal := false + for i := 1; i < len(buf); i++ { + switch buf[i] { + case '.': + hasDecimal = true + case 'p', 'P': + exp = append(exp, buf[i:]...) + buf = buf[:i] + case 'e', 'E': + if verb != 'x' && verb != 'X' { + exp = append(exp, buf[i:]...) + buf = buf[:i] + break + } + fallthrough + default: + digits-- + } + } + if !hasDecimal { + buf = append(buf, '.') + } + for digits > 0 { + buf = append(buf, '0') + digits-- + } + buf = append(buf, exp...) + } + + // print number with sign + if flags.plus || buf[0] != '+' { + if flags.zero && flags.hasWidth && tok.width > len(buf) { + in.p.WriteByte(buf[0]) + in.formatPadding(tok, tok.width-len(buf)) + in.p.Write(buf[1:]) + return + } + + in.formatPad(tok, buf) + return + } + + // print positive number without sign + in.formatPad(tok, buf[1:]) +} + +func (in *interpreter) fmtComplex(tok *formatToken, v complex128, sz int) { + switch tok.verb { + case 'v', 'b', 'g', 'G', 'x', 'X', 'f', 'F', 'e', 'E': + break + default: + in.formatBadVerb(tok) + } + + in.p.WriteByte('(') + in.fmtFloat(tok, real(v), sz/2) + + iTok := *tok + iTok.flags.plus = true + in.fmtFloat(&iTok, imag(v), sz/2) + in.p.WriteString("i)") +} + +// truncate returns the number of configured unicode symbols. +func (in *interpreter) truncate(tok *formatToken, s string) string { + if tok.flags.hasPrecision { + n := tok.precision + for i := range s { // handle utf-8 with help of range loop + n-- + if n < 0 { + return s[:i] + } + } + } + return s +} + +func (in *interpreter) fmtStr(tok *formatToken, s string) { + in.formatPadString(tok, in.truncate(tok, s)) +} + +func (in *interpreter) fmtPointer(tok *formatToken, value reflect.Value) { + verb := tok.verb + + var u uintptr + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + u = value.Pointer() + default: + in.formatBadVerb(tok) + return + } + + switch verb { + case 'v': + if tok.flags.sharpV { + in.p.WriteByte('(') + in.p.WriteString(value.Type().String()) + in.p.WriteString(")(") + if u == 0 { + in.p.WriteString("nil") + } else { + in.fmtHex64(tok, uint64(u), true) + } + in.p.WriteByte(')') + } else if u != 0 { + in.fmtHex64(tok, uint64(u), !tok.flags.sharp) + } else { + in.formatPadString(tok, "") + } + case 'p': + in.fmtHex64(tok, uint64(u), !tok.flags.sharp) + case 'b', 'o', 'd', 'x', 'X': + in.fmtInt(tok, uint64(u), false) + default: + in.formatBadVerb(tok) + } +} + +func (in *interpreter) formatBadVerb(tok *formatToken) { + st := &in.st + st.inError = true + + in.p.WriteString("%!") + in.p.WriteRune(tok.verb) + in.p.WriteByte('(') + + switch { + case st.arg != nil: + in.p.WriteString(reflect.TypeOf(st.arg).String()) + in.p.WriteByte('=') + tmpTok := *tok + tmpTok.verb = 'v' + in.formatArg(&tmpTok, st.arg) + case st.val.IsValid(): + in.p.WriteString(st.val.Type().String()) + in.p.WriteByte('=') + tmpTok := *tok + tmpTok.verb = 'v' + in.fmtValue(&tmpTok, st.val, 0) + default: + in.p.WriteString("") + } + + in.p.WriteByte(')') + st.inError = false +} + +func (in *interpreter) formatPad(tok *formatToken, b []byte) { + if !tok.flags.hasWidth || tok.width == 0 { + in.p.Write(b) + return + } + + width := tok.width - utf8.RuneCount(b) + if !tok.flags.minus { + in.formatPadding(tok, width) + in.p.Write(b) + } else { + in.p.Write(b) + in.formatPadding(tok, width) + } +} + +func (in *interpreter) formatPadString(tok *formatToken, s string) { + if !tok.flags.hasWidth || tok.width == 0 { + in.p.WriteString(s) + return + } + + width := tok.width - utf8.RuneCountInString(s) + if !tok.flags.minus { + in.formatPadding(tok, width) + in.p.WriteString(s) + } else { + in.p.WriteString(s) + in.formatPadding(tok, width) + } +} + +func (in *interpreter) fmtPaddingZeros(n int) { + in.formatPaddingWith(n, '0') +} + +func (in *interpreter) formatPadWidth(tok *formatToken, b []byte, padByte byte) { + if !tok.flags.hasWidth || tok.width == 0 { + in.p.Write(b) + return + } + + width := tok.width - utf8.RuneCount(b) + if !tok.flags.minus { + in.formatPaddingWith(width, padByte) + in.p.Write(b) + } else { + in.p.Write(b) + in.formatPaddingWith(width, padByte) + } +} + +func (in *interpreter) formatPadding(tok *formatToken, n int) { + if tok.flags.zero { + in.formatPaddingWith(n, '0') + } else { + in.formatPaddingWith(n, ' ') + } +} + +func (in *interpreter) formatPaddingWith(n int, padByte byte) { + if n <= 0 { + return + } + + var padBuf [128]byte + for n > 0 { + buf := padBuf[:] + if n < len(padBuf) { + buf = buf[:n] + } + for i := range buf { + buf[i] = padByte + } + + in.p.Write(buf) + n -= len(buf) + } +} + +// Width returns the value of the width option and whether it has been set. +func (f *formatterState) Width() (wid int, ok bool) { + return f.tok.width, f.tok.flags.hasWidth +} + +// Precision returns the value of the precision option and whether it has been set. +func (f *formatterState) Precision() (prec int, ok bool) { + return f.tok.precision, f.tok.flags.hasPrecision +} + +// Flag reports whether the flag c, a character, has been set. +func (f *formatterState) Flag(c int) bool { + flags := &f.tok.flags + switch c { + case '-': + return flags.minus + case '+': + return flags.plus || flags.plusV + case '#': + return flags.sharp || flags.sharpV + case ' ': + return flags.space + case '0': + return flags.zero + default: + return false + } +} diff --git a/vendor/github.com/urso/diag/ctxfmt/parse.go b/vendor/github.com/urso/diag/ctxfmt/parse.go new file mode 100644 index 00000000000..775450b82e4 --- /dev/null +++ b/vendor/github.com/urso/diag/ctxfmt/parse.go @@ -0,0 +1,279 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package ctxfmt + +import "unicode/utf8" + +type parser struct { + handler tokenHandler +} + +type tokenHandler interface { + onString(s string) + onToken(tok formatToken) + onParseError(formatToken, error) +} + +type formatToken struct { + field string + verb rune + width int + precision int + flags flags +} + +type flags struct { + named bool + hasWidth bool + hasPrecision bool + plus bool + plusV bool + minus bool + sharp bool + sharpV bool + space bool + zero bool +} + +var validVerbs [256]bool + +func init() { + for _, v := range "vtTbcdoOqxXUeEfFgGsqxXp" { + validVerbs[v] = true + } +} + +func (p *parser) parse(msg string) { + var i int + end := len(msg) + for i < end { + var ( + lasti = i + err error + ) + + i = findFmt(msg, i, end) + if i >= end { + i = lasti + break + } + if i+1 == end { + if i > lasti { + p.handler.onString(msg[lasti:i]) + } + p.handler.onParseError(formatToken{}, errNoVerb) + return + } + + // found escaped '%'. Report string printing '%' and ignore current '%' + if msg[i+1] == '%' { + p.handler.onString(msg[lasti : i+1]) + i += 2 + continue + } + + if i > lasti { + p.handler.onString(msg[lasti:i]) + } + + var tok formatToken + i, tok, err = parseFmt(msg, i, end) + if err != nil { + p.handler.onParseError(tok, err) + } else if tok.verb > utf8.RuneSelf || !validVerbs[tok.verb] { + p.handler.onParseError(tok, errInvalidVerb) + } else { + if tok.verb == 'v' { + tok.flags.sharpV = tok.flags.sharp + tok.flags.plusV = tok.flags.plus + tok.flags.sharp = false + tok.flags.plus = false + } + p.handler.onToken(tok) + } + } + + if i < end { + p.handler.onString(msg[i:]) + } +} + +func findFmt(in string, start, end int) (i int) { + for i = start; i < end && in[i] != '%'; { + i++ + } + return i +} + +func parseFmt(msg string, start, end int) (i int, tok formatToken, err error) { + i = start + 1 + if i < end && msg[i] == '{' { + return parseField(msg, start, end) + } + + i, err = parseFmtSpec(&tok, msg, i, end) + return i, tok, err +} + +func parseFmtSpec(tok *formatToken, msg string, start, end int) (int, error) { + i := start + + // parse flags + for i < end { + newi, isflag := parseFlag(&tok.flags, msg, i) + if !isflag { + break + } + i = newi + } + + // fast path for common case of simple lower case verbs without width or + // precision. + if c := msg[i]; 'a' <= c && c <= 'z' { + tok.verb = rune(c) + return i + 1, nil + } + + // try to parse width + num, isnum, newi := parseNum(msg, i, end) + if isnum { + if !tok.flags.hasWidth { + tok.width = num + tok.flags.hasWidth = true + } + i = newi + } + + // try to parse precision + if i < end && msg[i] == '.' { + i++ + num, isnum, newi := parseNum(msg, i, end) + if isnum { + if !tok.flags.hasPrecision { + tok.precision = num + tok.flags.hasPrecision = true + } + i = newi + } else if !tok.flags.hasPrecision { + tok.precision = 0 + tok.flags.hasPrecision = true + } + } + + if i >= end { + return i, errNoVerb + } + + // parse verb + verb := rune(msg[i]) + if verb >= utf8.RuneSelf { + verb, size := utf8.DecodeRuneInString(msg[i:]) + tok.verb = verb + i += size + return i + size, errInvalidVerb + } + tok.verb = verb + + return i + 1, nil +} + +// parseField parses a named field format specifier into st. +// The syntax of a field formatter is '%{[+#@][:]}'. +// +// The prefix '+', '#', '@' modify the printing if no format is configured. +// In this case the 'v' verb is assumed. The '@' flag is synonymous to '#'. +// +// The 'format' section can be any valid format specification +func parseField(msg string, start, end int) (i int, tok formatToken, err error) { + tok.flags.named = true + tok.verb = 'v' // default verb for fields is 'v' + + i = start + 2 // start is at '%' + if i >= end { + return end, tok, errCloseMissing + } + + switch msg[i] { + case '+': + tok.flags.plus = true + i++ + case '#', '@': + tok.flags.sharp = true + i++ + } + + pos := i + for i < end && msg[i] != '}' && msg[i] != ':' { + i++ + } + + if pos == i { + return i, tok, errNoFieldName + } + tok.field = msg[pos:i] + + if i >= end { + return i, tok, errCloseMissing + } + + if msg[i] == '}' { + return i + 1, tok, nil + } + + // msg[i] == ':' => parse format specification + i, err = parseFmtSpec(&tok, msg, i+1, end) + if err != nil { + return i, tok, nil + } + + // skip to end of formatter: + for i < end && msg[i] != '}' { + i++ + } + if i >= end { + return end, tok, errCloseMissing + } + return i + 1, tok, nil +} + +func parseFlag(flags *flags, msg string, pos int) (int, bool) { + switch msg[pos] { + case '#': + flags.sharp = true + return pos + 1, true + case '+': + flags.plus = true + return pos + 1, true + case '-': + flags.minus = true + flags.zero = false + return pos + 1, true + case '0': + flags.zero = !flags.minus + return pos + 1, true + case ' ': + flags.space = true + return pos + 1, true + } + + return 0, false +} + +func parseNum(msg string, start, end int) (num int, isnum bool, i int) { + for i = start; i < end && '0' <= msg[i] && msg[i] <= '9'; i++ { + if tooLarge(num) { + return 0, false, end + } + num = 10*num + int(msg[i]-'0') + } + return num, i > start, i +} + +func tooLarge(i int) bool { + const max int = 1e6 + return !(-max <= i && i <= max) +} diff --git a/vendor/github.com/urso/diag/ctxfmt/print.go b/vendor/github.com/urso/diag/ctxfmt/print.go new file mode 100644 index 00000000000..49dce87c5ea --- /dev/null +++ b/vendor/github.com/urso/diag/ctxfmt/print.go @@ -0,0 +1,90 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package ctxfmt + +import ( + "io" + "unicode/utf8" +) + +type printer struct { + To io.Writer + written int + err error +} + +func (p *printer) Write(buf []byte) (int, error) { + if p.err != nil { + return 0, p.err + } + + return p.doWrite(buf) +} + +func (p *printer) doWrite(buf []byte) (int, error) { + return p.upd(p.To.Write(buf)) +} + +func (p *printer) upd(n int, err error) (int, error) { + p.written += n + p.err = err + return n, err +} + +func (p *printer) WriteByte(b byte) error { + if p.err != nil { + return p.err + } + + if bw, ok := p.To.(io.ByteWriter); ok { + err := bw.WriteByte(b) + if err != nil { + p.err = err + } else { + p.written++ + } + return err + } + + _, err := p.doWrite([]byte{b}) + return err +} + +func (p *printer) WriteString(s string) (int, error) { + if p.err != nil { + return 0, p.err + } + + if sw, ok := p.To.(interface{ WriteString(string) (int, error) }); ok { + return p.upd(sw.WriteString(s)) + } + return p.doWrite(unsafeBytes(s)) +} + +func (p *printer) WriteRune(r rune) error { + if p.err != nil { + return p.err + } + + if rw, ok := p.To.(interface{ WriteRune(rune) error }); ok { + p.err = rw.WriteRune(r) + return p.err + } + + if r < utf8.RuneSelf { + return p.WriteByte(byte(r)) + } + + var runeBuf [utf8.UTFMax]byte + n := utf8.EncodeRune(runeBuf[:], r) + _, err := p.doWrite(runeBuf[:n]) + return err +} + +func (p *printer) onString(s string) { + p.WriteString(s) +} diff --git a/vendor/github.com/urso/diag/ctxfmt/util.go b/vendor/github.com/urso/diag/ctxfmt/util.go new file mode 100644 index 00000000000..36cbd60ab74 --- /dev/null +++ b/vendor/github.com/urso/diag/ctxfmt/util.go @@ -0,0 +1,43 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package ctxfmt + +import ( + "reflect" + "unicode/utf8" + "unsafe" + + "github.com/urso/diag" +) + +func isErrorValue(v interface{}) bool { + if err, ok := v.(error); ok { + return err != nil + } + return false +} + +func unsafeString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func unsafeBytes(s string) []byte { + sh := *((*reflect.SliceHeader)(unsafe.Pointer(&s))) + return *(*[]byte)((unsafe.Pointer)(&reflect.SliceHeader{Data: sh.Data, Len: sh.Len, Cap: sh.Cap})) +} + +func isFieldValue(v interface{}) bool { + _, ok := v.(diag.Field) + return ok +} + +func convRune(v uint64) rune { + if v > utf8.MaxRune { + return utf8.RuneError + } + return rune(v) +} diff --git a/vendor/github.com/urso/diag/ctxfmt/util_go11.go b/vendor/github.com/urso/diag/ctxfmt/util_go11.go new file mode 100644 index 00000000000..1338bab2a7e --- /dev/null +++ b/vendor/github.com/urso/diag/ctxfmt/util_go11.go @@ -0,0 +1,38 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +//+build !go1.12 + +package ctxfmt + +import "reflect" + +type mapIter struct { + m reflect.Value + keys []reflect.Value + pos int +} + +func newMapIter(m reflect.Value) *mapIter { + return &mapIter{ + m: m, + keys: m.MapKeys(), + pos: -1, + } +} + +func (i *mapIter) Next() bool { + i.pos++ + return i.pos < len(i.keys) +} + +func (i *mapIter) Key() reflect.Value { + return i.keys[i.pos] +} + +func (i *mapIter) Value() reflect.Value { + return i.m.MapIndex(i.Key()) +} diff --git a/vendor/github.com/urso/diag/ctxfmt/util_go12.go b/vendor/github.com/urso/diag/ctxfmt/util_go12.go new file mode 100644 index 00000000000..944a08d120a --- /dev/null +++ b/vendor/github.com/urso/diag/ctxfmt/util_go12.go @@ -0,0 +1,15 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +//+build go1.12 + +package ctxfmt + +import "reflect" + +func newMapIter(m reflect.Value) *reflect.MapIter { + return m.MapRange() +} diff --git a/vendor/github.com/urso/diag/diag.go b/vendor/github.com/urso/diag/diag.go new file mode 100644 index 00000000000..deda3ca0b33 --- /dev/null +++ b/vendor/github.com/urso/diag/diag.go @@ -0,0 +1,433 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package diag + +import ( + "sort" + "strings" +) + +// Context represents the diagnostic context tree. +// A context contains a number of user defined and standardized fields, +// plus a reference a 'before' context and an 'after' context. +// The current context and the 'after' context overwrite all fields +// written to the 'before' context. +// The 'after' context overwrites all fields added to the 'before' or the +// current context. +type Context struct { + totUser int + totStd int + fields []Field + mode fieldSel + before, after *Context +} + +// Visitor can be used to iterate all fields in a context. +// Shadowed fields will only be reported once. +// Use with (*Context).VisitKeyValues to collect a flattened list of key value pairs. +// Use VisitStructured to recursively iterate the context. +type Visitor interface { + OnObjStart(key string) error + OnObjEnd() error + OnValue(key string, v Value) error +} + +// order represents the global flattened order of all fields in a Context tree. +// The Len() reports the number of fields in a context. +// +// The ith field is accessed via `order.ctx[i].fields[ctx.idx[i]]` +type order struct { + idx []int // index of field in context in 'fields' of the i-th context + ctx []*Context // pointer to context the i-th field can be found in +} + +// view is a temporary snapshot of a context with an applied order. +// The view object can be used to iterate through all fields in a context. +type view struct { + Ctx *Context + order order +} + +// fieldSel configures a context its field 'filtering' in case a projection +// like (*Context).User or (*Context).Standardized has been applied to a context. +type fieldSel uint8 + +const ( + allFields fieldSel = standardizedFields | userFields | fieldsClosure + standardizedFields fieldSel = 1 << 0 // list standardized fields + userFields fieldSel = 1 << 1 // list used fields + fieldsClosure fieldSel = 1 << 2 // use fields in before/after contexts +) + +// NewContext creates a new context adding a before and after context +// for shadoing fields. When creating a context a snapshot of the before and +// after contexts is taken, such that they can still be manipulated, without +// affecting the current context. +func NewContext(before, after *Context) *Context { + totStd, totUser := 0, 0 + if before != nil { + totStd += before.totStd + totUser += before.totUser + } + if after != nil { + totStd += after.totStd + totUser += after.totUser + } + + return &Context{ + totStd: totStd, + totUser: totUser, + before: makeSnapshot(before), + after: makeSnapshot(after), + mode: allFields, + } +} + +// Len reports the number of fields in the current context. If two fields have +// the same key, both will be reported. +func (c *Context) Len() int { + if c == nil { + return 0 + } + return c.totUser + c.totStd +} + +// Local projection, that returns a snapshot of the current context +// without before and after contexts. +func (c *Context) Local() *Context { + totUser, totStd := 0, 0 + for i := range c.fields { + if c.fields[i].Standardized { + totStd++ + } else { + totUser++ + } + } + + if (c.mode & userFields) == 0 { + totUser = 0 + } + if (c.mode & standardizedFields) == 0 { + totStd = 0 + } + + return &Context{ + totUser: totUser, + totStd: totStd, + fields: c.fields, + mode: c.mode &^ fieldsClosure, + } +} + +// User projection, that will only contain user fields. All standardized fields +// (even in after/before context) will be ignored. +func (c *Context) User() *Context { + return &Context{ + totUser: c.totUser, + totStd: 0, + fields: c.fields, + mode: c.mode &^ standardizedFields, + before: c.before, + after: c.after, + } +} + +// Standardized projection, that will only contain standardized fields. All +// user fields (even in after/before context) will be ignored. +func (c *Context) Standardized() *Context { + return &Context{ + totStd: c.totStd, + totUser: 0, + fields: c.fields, + mode: c.mode &^ userFields, + before: c.before, + after: c.after, + } +} + +func makeSnapshot(ctx *Context) *Context { + if ctx.Len() == 0 { + return nil + } + if len(ctx.fields) > 0 { + return cloneContext(ctx) + } + + if ctx.before.Len() == 0 { + return ctx.after + } else if ctx.after.Len() == 0 { + return ctx.before + } else { + return cloneContext(ctx) + } +} + +func cloneContext(ctx *Context) *Context { + snapshot := *ctx + return &snapshot +} + +// Add creates and adds a new user field to the current context. +func (c *Context) Add(key string, value Value) { + c.AddField(Field{Key: key, Value: value}) +} + +// AddField adds a new field to the current context. +func (c *Context) AddField(f Field) { + c.fields = append(c.fields, f) + if f.Standardized { + c.totStd++ + } else { + c.totUser++ + } +} + +// AddFields adds a list a variable number of fields to the current context. +func (c *Context) AddFields(fs ...Field) { + c.fields = append(c.fields, fs...) + for i := range fs { + if fs[i].Standardized { + c.totStd++ + } else { + c.totUser++ + } + } +} + +// AddAll adds a list of fields or key value pairs to the current context. +// For example ctx.AddAll("a": 1, diag.String("b", "test")) will +// create a context with the two fields a=1 and b=test. +func (c *Context) AddAll(args ...interface{}) { + for i := 0; i < len(args); { + arg := args[i] + switch v := arg.(type) { + case string: + switch val := args[i+1].(type) { + case Value: + c.Add(v, val) + default: + c.AddField(Any(v, args[i+1])) + } + + i += 2 + case Field: + c.AddField(v) + i++ + } + } +} + +// VisitKeyValues reports unique fields to the given visitor. Keys will be +// flattened, only calling the OnValue callback on the given visitor. +func (c *Context) VisitKeyValues(v Visitor) error { + view := newView(c, c.mode&fieldsClosure == 0) + return view.VisitKeyValues(v) +} + +// VisitStructured reports the context its structure to the visitor. +// Fields having the same prefix separated by dots will be combined into +// a common object. +func (c *Context) VisitStructured(v Visitor) error { + view := newView(c, c.mode&fieldsClosure == 0) + return view.VisitStructured(v) +} + +func newView(ctx *Context, localOnly bool) *view { + v := &view{Ctx: ctx} + v.order.init(ctx, localOnly, true, true) + return v +} + +func (view *view) VisitKeyValues(v Visitor) error { + o := &view.order + L := o.Len() + + for i := 0; i < L; i++ { + ctx, idx := o.ctx[i], o.idx[i] + fld := &ctx.fields[idx] + key := fld.Key + + if j := i + 1; j < L { + other := o.key(j) + if key == other { + continue // ignore older duplicates + } + + if strings.HasPrefix(other, key) && other[len(key)] == '.' { + continue // ignore value if it's overwritten by an object + } + } + + if err := v.OnValue(key, fld.Value); err != nil { + return err + } + } + + return nil +} + +func (view *view) VisitStructured(v Visitor) error { + o := &view.order + L := o.Len() + + objPrefix := "" + level := 0 + + // TODO: this function keeps track of the number of ObjStart and ObjEnd events + // by scanning for `.` in the fields names. + // Instead of scanning let's check if we can store the string index in + // a slice (use as a stack) or if we should try to use recursion in order + // to track the 'index' stack on the go-routine stack itself. + + for i := 0; i < L; i++ { + ctx, idx := o.ctx[i], o.idx[i] + fld := &ctx.fields[idx] + key := fld.Key + + if j := i + 1; j < L { + other := o.key(j) + if key == other { + continue // ignore older duplicates + } + + if strings.HasPrefix(other, key) && other[len(key)] == '.' { + continue // ignore value if it's overwritten by an object + } + } + + // decrease object level until last and current key have same path prefix + if L := commonPrefix(key, objPrefix); L < len(objPrefix) { + for L > 0 && key[L-1] != '.' { + L-- + } + + // remove levels + if L > 0 { + for delta := objPrefix[L:]; len(delta) > 0; { + idx := strings.IndexRune(delta, '.') + if idx < 0 { + break + } + + delta = delta[idx+1:] + level-- + if err := v.OnObjEnd(); err != nil { + return err + } + } + + objPrefix = key[:L] + } else { + for ; level > 0; level-- { + if err := v.OnObjEnd(); err != nil { + return err + } + } + objPrefix = "" + } + } + + // increase object level + for { + start := len(objPrefix) + idx := strings.IndexRune(key[start:], '.') + if idx < 0 { + break + } + + level++ + objPrefix = key[:len(objPrefix)+idx+1] + if err := v.OnObjStart(key[start : start+idx]); err != nil { + return err + } + } + + k := key[len(objPrefix):] + if err := v.OnValue(k, fld.Value); err != nil { + return err + } + } + + for ; level > 0; level-- { + if err := v.OnObjEnd(); err != nil { + return err + } + } + + return nil +} + +func (o *order) init(ctx *Context, localOnly, user, std bool) { + l := ctx.Len() + if l == 0 { + return + } + + o.idx = make([]int, l) + o.ctx = make([]*Context, l) + + n := index(o, ctx, localOnly, user, std) + o.idx = o.idx[:n] + o.ctx = o.ctx[:n] + sort.Stable(o) +} + +func index(o *order, ctx *Context, localOnly, user, std bool) int { + pos := 0 + user = user && (ctx.mode&userFields) == userFields + std = std && (ctx.mode&standardizedFields) == standardizedFields + + if !localOnly { + if L := ctx.before.Len(); L > 0 { + pos += index(o, ctx.before, false, user, std) + } + } + + for i := range ctx.fields { + idxField := (user && std) || + (user && !ctx.fields[i].Standardized) || + (std && ctx.fields[i].Standardized) + if !idxField { + continue + } + + o.idx[pos] = i + o.ctx[pos] = ctx + pos++ + } + + if !localOnly { + if L := ctx.after.Len(); L > 0 { + tmp := &order{idx: o.idx[pos:], ctx: o.ctx[pos:]} + pos += index(tmp, ctx.after, false, user, std) + } + } + + return pos +} + +func (o *order) key(i int) string { return o.ctx[i].fields[o.idx[i]].Key } +func (o *order) Len() int { return len(o.idx) } +func (o *order) Less(i, j int) bool { return o.key(i) < o.key(j) } +func (o *order) Swap(i, j int) { + o.idx[i], o.idx[j] = o.idx[j], o.idx[i] + o.ctx[i], o.ctx[j] = o.ctx[j], o.ctx[i] +} + +func commonPrefix(a, b string) int { + end := len(a) + if alt := len(b); alt < end { + end = alt + } + + for i := 0; i < end; i++ { + if a[i] != b[i] { + return i + } + } + return end +} diff --git a/vendor/github.com/urso/diag/doc.go b/vendor/github.com/urso/diag/doc.go new file mode 100644 index 00000000000..8a7ccd500c9 --- /dev/null +++ b/vendor/github.com/urso/diag/doc.go @@ -0,0 +1,40 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +// Package diag provides a diagnostic context that can be used to record +// contextual information about the current scope, that needs to be reported. +// Diagnostic contexts can be used for logging, to add context when wrapping +// errors, or to pass additional information with data/events/objects passed +// through multiple subsystems. +// +// Contexts are represented as trees. A Context has a 'before'-Context and an +// 'after'-Context. The order of contexts define the shadowing of fields in +// case multiple contexts reported different values for the same field. All +// fields in the 'after'-Context always overwrite fields in the current context +// node and fields in the 'before'-Context. Fields in the 'before'-Context are +// always shadowed by the current Context node and the 'after'-Context. +// +// The diag package differentiates between standardized and user defined fields. +// Although diag does not define any standardized fields, libraries and users are +// encouraged to create constructors for standardized fields. +// Standardized fields ensure that consistent field names and types are used by programmers. +// Constructors can add some type-safety, for post-processing or storing a +// diagnostic context state in databases or other storage systems that require +// a schema. +// +// One can define a standardized "Host" field like this: +// +// package myfields +// +// func Host(name string) diag.Field { +// return diag.Field{Standardized: true, Key: "host", Value: diag.ValString(name)} +// } +// +// The fields can be used with a context like this: +// +// ctx.AddField(myfields.Host("localhost")) +// +package diag diff --git a/vendor/github.com/urso/diag/fld.go b/vendor/github.com/urso/diag/fld.go new file mode 100644 index 00000000000..d50a6d133d4 --- /dev/null +++ b/vendor/github.com/urso/diag/fld.go @@ -0,0 +1,59 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package diag + +import ( + "time" +) + +// Field to be stored in a context. +type Field struct { + Key string + Value Value + + // Standardized indicates that the field its key and value are standardized + // according to some external schema. Consumers of a context might decide to + // handle Standardized and non-standardized fields differently. + Standardized bool +} + +func userField(k string, v Value) Field { + return Field{Key: k, Value: v} +} + +// Bool creates a new user-field storing a bool. +func Bool(key string, b bool) Field { return userField(key, ValBool(b)) } + +// Int creates a new user-field storing an int. +func Int(key string, i int) Field { return userField(key, ValInt(i)) } + +// Int64 creates a new user-field storing an int64 value. +func Int64(key string, i int64) Field { return userField(key, ValInt64(i)) } + +// Uint creates a new user-field storing an uint. +func Uint(key string, i uint) Field { return userField(key, ValUint(i)) } + +// Uint64 creates a new user-field storing an uint64. +func Uint64(key string, i uint64) Field { return userField(key, ValUint64(i)) } + +// Float creates a new user-field storing a float. +func Float(key string, f float64) Field { return userField(key, ValFloat(f)) } + +// String creates a new user-field storing a string. +func String(key, str string) Field { return userField(key, ValString(str)) } + +// Duration creates a new user-field storing a duration. +func Duration(key string, dur time.Duration) Field { return userField(key, ValDuration(dur)) } + +// Timestamp creates a new user-field storing a time value. +func Timestamp(key string, ts time.Time) Field { return userField(key, ValTime(ts)) } + +// Any creates a new user-field storing any value as interface. +func Any(key string, ifc interface{}) Field { + // TODO: use type switch + reflection to select concrete Field + return userField(key, ValAny(ifc)) +} diff --git a/vendor/github.com/urso/diag/go.mod b/vendor/github.com/urso/diag/go.mod new file mode 100644 index 00000000000..9433447cc46 --- /dev/null +++ b/vendor/github.com/urso/diag/go.mod @@ -0,0 +1,5 @@ +module github.com/urso/diag + +go 1.13 + +require github.com/google/go-cmp v0.4.0 diff --git a/vendor/github.com/urso/diag/go.sum b/vendor/github.com/urso/diag/go.sum new file mode 100644 index 00000000000..7a664bef90b --- /dev/null +++ b/vendor/github.com/urso/diag/go.sum @@ -0,0 +1,4 @@ +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/urso/diag/gocontext.go b/vendor/github.com/urso/diag/gocontext.go new file mode 100644 index 00000000000..1b32e6b568b --- /dev/null +++ b/vendor/github.com/urso/diag/gocontext.go @@ -0,0 +1,63 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package diag + +import "context" + +type key int + +// diagContextKey is the key for diag.Context values in context.Contexts. +// It is unexported; clients should use DiagnosticsFrom, and NewDiagnostics. +var diagContextKey key + +// NewDiagnostics adds a diagnostics context to a context.Context value. +// The old diagnostic context will be shadowed if the context.Context already +// contains a diagnostics context. +func NewDiagnostics(ctx context.Context, dc *Context) context.Context { + return context.WithValue(ctx, diagContextKey, dc) +} + +// DiagnosticsFrom extracts a diagnostic context from context.Context. +func DiagnosticsFrom(ctx context.Context) (*Context, bool) { + tmp := ctx.Value(diagContextKey) + if tmp == nil { + return nil, false + } + + dc, ok := tmp.(*Context) + return dc, ok +} + +// PushFields adds a new diagnostics context with the given set of fields +// to a context.Context value. The new diagnostic context references the +// existing diagnostic context, if one exists (fields will be combined). +func PushFields(ctx context.Context, fields ...Field) context.Context { + ctx, dc := extendDiagnostics(ctx) + dc.AddFields(fields...) + return ctx +} + +// PushDiagnostics adds a new diagnostics context with the given fields to a +// context.Context value (see (*Context).AddAll). The new diagnostic context +// references the existing diagnostic context, if one exists (fields will be +// combined). +func PushDiagnostics(ctx context.Context, args ...interface{}) context.Context { + ctx, dc := extendDiagnostics(ctx) + dc.AddAll(args...) + return ctx +} + +func extendDiagnostics(ctx context.Context) (context.Context, *Context) { + dc, ok := DiagnosticsFrom(ctx) + if ok { + dc = NewContext(dc, nil) + } else { + dc = NewContext(nil, nil) + } + + return NewDiagnostics(ctx, dc), dc +} diff --git a/vendor/github.com/urso/diag/value.go b/vendor/github.com/urso/diag/value.go new file mode 100644 index 00000000000..e64b8f7a0b4 --- /dev/null +++ b/vendor/github.com/urso/diag/value.go @@ -0,0 +1,165 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package diag + +import ( + "math" + "time" +) + +// Value represents a reportable value to be stored in a Field. +// The Value struct provides a slot for primitive values that require only +// 64bits, a string, or an arbitrary interface. The interpretation of the slots is up to the Reporter. +type Value struct { + Primitive uint64 + String string + Ifc interface{} + + Reporter Reporter +} + +// Reporter defines the type and supports unpacking, querying the decoded Value. +type Reporter interface { + Type() Type + + // Ifc decodes the Value and reports the decoded value to as `interface{}` + // to the provided callback. + Ifc(*Value, func(interface{})) +} + +// Type represents the possible types a Value can have. +type Type uint8 + +const ( + IfcType Type = iota + BoolType + IntType + Int64Type + Uint64Type + Float64Type + DurationType + TimestampType + StringType +) + +// Interface decodes and returns the value stored in Value. +func (v *Value) Interface() (ifc interface{}) { + v.Reporter.Ifc(v, func(tmp interface{}) { + ifc = tmp + }) + return ifc +} + +// ValBool creates a new Value representing a bool. +func ValBool(b bool) Value { + var x uint64 + if b { + x = 1 + } + return Value{Primitive: x, Reporter: _boolReporter} +} + +type boolReporter struct{} + +var _boolReporter Reporter = boolReporter{} + +func (boolReporter) Type() Type { return BoolType } +func (boolReporter) Ifc(v *Value, fn func(interface{})) { fn(bool(v.Primitive != 0)) } + +// ValInt create a new Value representing an int. +func ValInt(i int) Value { return Value{Primitive: uint64(i), Reporter: _intReporter} } + +type intReporter struct{} + +var _intReporter Reporter = intReporter{} + +func (intReporter) Type() Type { return IntType } +func (intReporter) Ifc(v *Value, fn func(interface{})) { fn(int(v.Primitive)) } + +// ValInt64 creates a new Value representing an int64. +func ValInt64(i int64) Value { return Value{Primitive: uint64(i), Reporter: _int64Reporter} } + +type int64Reporter struct{} + +var _int64Reporter Reporter = int64Reporter{} + +func (int64Reporter) Type() Type { return Int64Type } +func (int64Reporter) Ifc(v *Value, fn func(v interface{})) { fn(int64(v.Primitive)) } + +// ValUint creates a new Value representing an uint. +func ValUint(i uint) Value { return ValUint64(uint64(i)) } + +// ValUint64 creates a new Value representing an uint64. +func ValUint64(u uint64) Value { return Value{Primitive: u, Reporter: _uint64Reporter} } + +type uint64Reporter struct{} + +var _uint64Reporter Reporter = uint64Reporter{} + +func (uint64Reporter) Type() Type { return Int64Type } +func (uint64Reporter) Ifc(v *Value, fn func(v interface{})) { fn(uint64(v.Primitive)) } + +// ValFloat creates a new Value representing a float. +func ValFloat(f float64) Value { + return Value{Primitive: math.Float64bits(f), Reporter: _float64Reporter} +} + +type float64Reporter struct{} + +var _float64Reporter Reporter = float64Reporter{} + +func (float64Reporter) Type() Type { return Float64Type } +func (float64Reporter) Ifc(v *Value, fn func(v interface{})) { fn(math.Float64frombits(v.Primitive)) } + +// ValString creates a new Value representing a string. +func ValString(str string) Value { return Value{String: str, Reporter: _strReporter} } + +type strReporter struct{} + +var _strReporter Reporter = strReporter{} + +func (strReporter) Type() Type { return StringType } +func (strReporter) Ifc(v *Value, fn func(v interface{})) { fn(v.String) } + +// ValDuration creates a new Value representing a duration. +func ValDuration(dur time.Duration) Value { + return Value{Primitive: uint64(dur), Reporter: _durReporter} +} + +type durReporter struct{} + +var _durReporter Reporter = durReporter{} + +func (durReporter) Type() Type { return DurationType } +func (durReporter) Ifc(v *Value, fn func(v interface{})) { fn(time.Duration(v.Primitive)) } + +// ValTime creates a new Value representing a timestamp. +func ValTime(ts time.Time) Value { + return Value{Ifc: ts, Reporter: _timeReporter} +} + +type timeReporter struct{} + +var _timeReporter Reporter = timeReporter{} + +func (timeReporter) Type() Type { return TimestampType } +func (timeReporter) Ifc(v *Value, fn func(v interface{})) { + fn(v.Ifc) +} + +// ValAny creates a new Value representing any value as interface. +func ValAny(ifc interface{}) Value { return Value{Ifc: ifc, Reporter: _anyReporter} } +func reportAny(v *Value, fn func(v interface{})) { + fn(v.Ifc) +} + +type anyReporter struct{} + +var _anyReporter Reporter = anyReporter{} + +func (anyReporter) Type() Type { return IfcType } +func (anyReporter) Ifc(v *Value, fn func(v interface{})) { fn(v.Ifc) } diff --git a/vendor/github.com/urso/ecslog/.gitignore b/vendor/github.com/urso/ecslog/.gitignore new file mode 100644 index 00000000000..f1c181ec9c5 --- /dev/null +++ b/vendor/github.com/urso/ecslog/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/urso/ecslog/.travis.yml b/vendor/github.com/urso/ecslog/.travis.yml new file mode 100644 index 00000000000..8263ed9772f --- /dev/null +++ b/vendor/github.com/urso/ecslog/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - '1.11' + - '1.12' + - '1.13' + - 'tip' diff --git a/vendor/github.com/urso/ecslog/LICENSE b/vendor/github.com/urso/ecslog/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/urso/ecslog/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/urso/ecslog/README.md b/vendor/github.com/urso/ecslog/README.md new file mode 100644 index 00000000000..f0fdd4b352e --- /dev/null +++ b/vendor/github.com/urso/ecslog/README.md @@ -0,0 +1,470 @@ +# ecslog + +ecslog is an experimental structured logger for the Go programming language. + +**TOC** +- [Structure](#structure) +- [Concepts](#concepts) + * [Fields](#fields) + * [Context](#context) + * [Capturing Format strings](#capturing-format-strings) + * [Errors](#errors) +- [Use genfields](#use-genfields) + +Aim of this project is to create a type safe logger generating log events which +are fully compatible to the [Elastic Common Schema +(ECS)](https://github.com/elastic/ecs). ECS defines a common set of fields for +collecting, processing, and ingesting data within the [Elastic Stack](https://www.elastic.co/guide/en/elastic-stack/current/elastic-stack.html#elastic-stack). + +Logs should be available for consumption by developers, operators, and any kind +of automated processing (index for search, store in databases, security +analysis, alerting). + +While developers want to add additional state to log messages +troubleshooting, other users might not gain much value from unexplained +internal state being printed. First and foremost logs should be +self-explanatory messages. +Yet in the presence of micro-services and highly multithreaded applications +standardized context information is mandatory for filtering and correlating +relevant log messages by machine, service, thread, API call or user. + +Ideally automated processes should not have to deal with parsing the actual +message. Messages can easily change between releases, and should be ignored at +best. We can and should provide as much insight into our logs as possible with +the help of additional meta-data used to annotate the log message. + +Using untyped and schemaless structured logging, we put automation at +risk of breaking, or requiring operators to adapt transformation every now and then. +There is always the chance of developers, removing or renaming fields. Or using +the same field names, but with values of different types. Some consequences of +undetected schema changes are: +- A subset of logs might not be indexible in an Elasticsearch Index anymore due + to mapping conflicts for example. +- Scripts/Applications report errors or crash due to unexpected types +- Analysers produce wrong results due to expected fields becoming missing or new ones have been added. + +Creating logs based on a common schema like ECS helps in defining and +guaranteeing a common log structure a many different stakeholders can rely on +(See [What are the benefits of using ECS?](https://github.com/elastic/ecs#what-are-the-benefits-of-using-ecs).). +ECS defines a many common fields, but is still extensible (See +[Fields](https://github.com/elastic/ecs#fields)). ECS defines a core level and +an extended level, [reserves some common +namespaces](https://github.com/elastic/ecs#reserved-section-names). It is not +fully enclosed, but meant to be extended, so to fit an +applications/organizations needs. + +ecslog distinguishes between standardized and user(developer) provided fields. +The standardized fields are type-safe, by providing developers with type-safe +field constructors. These are checked at compile time and guarantee that the +correct names will be used when publish the structured log messages. + +ECS [defines its schema in yaml files](https://github.com/elastic/ecs/tree/master/schemas). +These files are compatible to `fields.yml` files, that are also used in the Elastic +Beats project. Among others Beats already generate Documentation, Kibana index +patterns, [Elasticsearch Index Templates](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html) +based on these definitions. + +ecslog reuses the definitions provided by ECS, so to generate the code for the +type-safe ECS compatible field constructors (See [tool +sources](https://github.com/urso/ecslog/tree/master/cmd/genfields)). + +Using the available definitions and tools it is possible to create log events, +which are normalized and storable into Elasticsearch as is. + + +## Structure + +**Packages**: +- **.**: Top level package defining the public logger. +- **./backend** logger backend interface definitions and composable implementations for building actual logging outputs. +- **./ctxtree**: internal representation of log and error contexts. +- **./fld**: Support for fields. +- **./fld/ecs**: ECS field constructors. +- **./errx**: Error support package with support for: + - wrapping/annotating errors with additional context + - querying errors by predicate, contents, type + - walking trace/tree of errors + +## Concepts + +### Fields + +ecslog differentiates between standardized fields and user fields. We provide +type safe constructors for standardized fields, but user defined fields are not +necessarily type-safe and often carry additional debug information for +consumption by the actual developer. Consumers of logs should be prepared to +remove user fields from log messages if necessary. + +The structured logging backends mix in standardized fields as is, right at the +root of the document/event to be generated. User fields are prefixed by +`fields.`. + +This log statement using the standardized `ecs.agent.name` field and the user defined `myfield`: + +``` + log.With( + // ECS standardized field + ecs.Agent.Name("myapp"), + + // user field + "myfield", "test", + ).Info("info message") +``` + +produces this JSON document: + +``` + { + ... + "agent": { + "name": "myapp" + }, + "fields": { + "myfield": "test" + }, + "log": { + ... + }, + "message": "info message" + } +``` + +### Context + +The logger it's context is implemented by the **ctxtree** package. +Fields can only be added to an context, but not be removed or updated. + +A field added twice to a context will be reported only once, ensuring tools +operating on the log message always receive a well defined JSON document. +Calling: + +``` + log.With("field", 1, "field", 2).Info("hello world") +``` + +or: + +``` + log.With("field", 1).With("field", 2).Info("hello world") +``` + +produces: + +``` + { + ... + "fields": { + "field": 2 + }, + "log": { + ... + }, + "message": "hello world" + } +``` + +Internally the context is represented as a tree. Within one node in the tree, +fields are ordered by the order they've been added to the context. +When creating a context, one can pass a 'predecessor' and a 'successor' to the +context. A snapshot of the current state of these contexts will be used, so to +allow concurrent use of contexts. + +The order of fields in a context-tree is determined by an depth-first traversal +of all contexts in the tree. This is used to link contexts between loggers +top-down, while linking contexts of error values from the bottom upwards. + +### Capturing Format strings + +The logging methods `Tracef`, `Debugf`, `Infof`, `Errorf` require a format +string as first argument. The intend of these methods is to create readable +and explanatory message. + +The format strings supported are mostly similar to the fmt.Printf family, but add +support for capturing additional user fields in the current log context: + +``` + log.Errorf("Can not open '%{file}'.", "file.txt") +``` + +produces this document: + +``` +{ + ... + "fields": { + "file": "file.txt" + }, + "log": { + ... + }, + "message": "Can not open 'file.txt'." +} +``` + + +Applications should log messages like `"can not open file.txt"` instead of +`"can not open file"` forcing the user to look at configuration or additional +fields in the log message. This is amplified by the fact that ecslog +backends can supress the generation of the context when logging. The text backend +without context capturing will just print: + +``` +2019-01-05T20:30:25+01:00 ERROR main.go:79 can not open file.txt +``` + +Standardized fields can also be passed to a format string via: + +``` + log.Errorf("Failed to access %v", ecs.File.Path("test.txt")) +``` + +or: + +``` + log.Errorf("Failed to access '%{file}'", ecs.File.Path("test.txt")) +``` + +Both calls produce the document: + +``` +{ + ... + "file": { + "path": "test.txt" + }, + "log": { + ... + }, + "message": "Failed to access 'test.txt'" +} +``` + +### Errors + +Error values serve multiple purposes. Error values are not +only used to signal an error to the caller, but also give the programmer a +chance to act on errors by interrogating the error value. Eventually an error +value is logged as well for troubleshooting. In presence of structured logging +an error value should support: +- Examining the value by the source code. +- Create self-explanatory human readable message. +- Carry additional context for automated processes consuming logs with errors (e.g. alerting). +- Serialize/Print/Examine causes of the error as well. + +Error values tend to be passed bottom-up from the root cause(s), until they are +eventually logged. So to understand the root cause and the actual context in +which the error was produced it is a good idea to annotate errors with +additional context while bubbling up. + +By properly annotating/wrapping errors we end up with a call-trace. ecslog +assumes the trace to be a tree, so to also capture and represent multi-error +values. The root-cause(s) are the leaf-node(s) in the tree. + +Packages often used for wrapping/annotating errors are: +- github.com/hashicorp/errwrap +- github.com/pkg/errors +- github.com/hashicorp/go-multierror +- go.uber.org/multierr +- github.com/joeshaw/multierror + +Difficulty with the many error packages is consistent handling and logging of +errors. For example different means on accessing an errors cause. The error +interface mandates an error implementing `Error() string` only. Some packages +also implement the `fmt.Formatter` interface, so to only print the full trace +if the format string `'%+v'` is used. This easily leads to confusion on how to +log an error, potentially not logging the actual root cause. + +For getting some consistency when dealing with error values `ecslog/errx` +provides utility functions for wrapping, annotating, and examining error +values. + +Functions for iterating all errors in an error tree are: `Iter`, `Walk`, `WalkEach` + +For manual walking an error tree one can use `NumCauses` and `Cause`. + +The errx can examine the error trees of error types implementing `Cause() +error`, `WrapperErrors() []error`, and `NumCauses() int, Cause(i int) error`. +This makes it compatible to a number of custom error packages, but not all. + +`errx` also provides `ContainsX/FindX/CollectX` functions. These support custom +predicates, types, or sentinal error values. + +We can also use `errx` to wrap errors via `Errf`, `Wrap`, `WrapAll`. All these +functions support Capturing Format strings, so to add additional context for +logging. The location in the source code will also be captured when using the +error constructors/wrappers. + +For example: +``` + errx.Wrap(io.ErrUnexpectedEOF, "failed to read %{file}", "file.txt") +-> + { + { + "file": ".../main.go", + "line": 128 + }, + "cause": { + "message": "unexpected EOF" + }, + "ctx": { + "fields": { + "file": "file.txt" + } + }, + "message": "failed to read file.txt: unexpected EOF" + } +``` + +We can add some additional context for logging via `errx.With`: + +``` + errx.With( + ecs.HTTP.Request.Method("GET"), + ecs.URL.Path("/get_file/file.txt"), + ).Wrap(io.ErrUnexpectedEOF, "failed to read %{file}", "file.txt") +-> + { + "at": { + "file": ".../main.go", + "line": 46 + }, + "cause": { + "message": "unexpected EOF" + }, + "ctx": { + "fields": { + "file": "file.txt" + }, + "http": { + "request": { + "method": "GET" + } + }, + "url": { + "path": "/get_file/file.txt" + } + }, + "message": "failed to read file.txt: unexpected EOF" + } +``` + +The logger backends rely on `errx` for examining and serializing errors in a +consistent way (best effort). + +When serializing errors, the combined context is added to the `ctx` field. +The 'local' error message (as reporter via `Error() string`) is added to the +`message` field. + +The location will be added if the error value implements `At() (string, int)`. +Multi-cause errors will add an array with each error value to the `causes` field. + + +Example: +``` + seviceLog := log.With( + ecs.Service.Name("my server"), + ecs.Host.Hostname("localhost"), + ) + + ... + + handlerLog := seviceLog.With( + ecs.HTTP.Request.Method("GET"), + ecs.URL.Path("/get_file/file.txt"), + ecs.Source.Domain("localhost"), + ecs.Source.IP("127.0.0.1"), + ) + + ... + + file := "file.txt" + + err := errx.With( + ecs.File.Path(file), + ecs.File.Extension("txt"), + ecs.File.Owner("me"), + ).Wrap(io.ErrUnexpectedEOF, "failed to read file") + + ... + + handlerLog.Error("Failed to serve %v: %v", ecs.File.Path(file), err) +``` + +JSON log message: + +``` +{ + "@timestamp": "2019-01-05T20:16:04.865708+01:00", + "error": { + "at": { + "file": ".../main.go", + "line": 46 + }, + "cause": { + "message": "unexpected EOF" + }, + "ctx": { + "file": { + "extension": "txt", + "owner": "me", + "path": "file.txt" + } + }, + "message": "failed to read file: unexpected EOF" + }, + "fields": { + "custom": "value", + "nested": { + "custom": "another value" + } + }, + "file": { + "path": "file.txt" + }, + "host": { + "hostname": "localhost" + }, + "http": { + "request": { + "method": "GET" + } + }, + "log": { + "file": { + "basename": "main.go", + "line": 154, + "path": ".../ecslog/cmd/tstlog/main.go" + }, + "level": "error" + }, + "message": "Failed to serve file.txt: failed to read file: unexpected EOF", + "service": { + "name": "my server" + }, + "source": { + "domain": "localhost", + "ip": "127.0.0.1" + }, + "url": { + "path": "/get_file/file.txt" + } +} +``` + + +## Use genfields + +The genfields script (found in cmd/genfields) should be used to convert an ECS +compatible schema definition to type safe field constructors that can be used +in Go code. + +The fld/ecs/0gen.go source file uses genfields for creating the ECS field constructors via +go generate. + +The genfields parses the schema definition from a directory containing the +schema yaml files: + +``` +genfields -out schema.go -fmt -schema +``` + diff --git a/vendor/github.com/urso/ecslog/backend/appender/file.go b/vendor/github.com/urso/ecslog/backend/appender/file.go new file mode 100644 index 00000000000..c8d0ded6568 --- /dev/null +++ b/vendor/github.com/urso/ecslog/backend/appender/file.go @@ -0,0 +1,84 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package appender + +import ( + "bufio" + "io" + "os" + "sync" + + "github.com/urso/ecslog/backend" + "github.com/urso/ecslog/backend/layout" +) + +type file struct { + f *os.File + buf *bufio.Writer + lvl backend.Level + mu sync.Mutex + layout layout.Layout + forceFlush bool +} + +func File( + lvl backend.Level, + path string, + perm os.FileMode, + layout layout.Factory, + bufferSize int, + immediateFlush bool, +) (backend.Backend, error) { + f, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, perm) + if err != nil { + return nil, err + } + + var buf *bufio.Writer + var out io.Writer = f + if bufferSize >= 0 { + buf = bufio.NewWriterSize(f, bufferSize) + out = buf + } + + l, err := layout(out) + if err != nil { + return nil, err + } + + return &file{ + f: f, + lvl: lvl, + buf: buf, + layout: l, + forceFlush: buf != nil && immediateFlush, + }, nil +} + +func (f *file) For(name string) backend.Backend { + return f +} + +func (f *file) IsEnabled(lvl backend.Level) bool { + return lvl >= f.lvl +} + +func (f *file) UseContext() bool { + return f.layout.UseContext() +} + +func (f *file) Log(msg backend.Message) { + f.mu.Lock() + defer f.mu.Unlock() + + f.layout.Log(msg) + if !f.forceFlush { + return + } + + f.buf.Flush() +} diff --git a/vendor/github.com/urso/ecslog/backend/appender/writer.go b/vendor/github.com/urso/ecslog/backend/appender/writer.go new file mode 100644 index 00000000000..13eea2a1c26 --- /dev/null +++ b/vendor/github.com/urso/ecslog/backend/appender/writer.go @@ -0,0 +1,76 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package appender + +import ( + "io" + "os" + "sync" + + "github.com/urso/ecslog/backend" + "github.com/urso/ecslog/backend/layout" +) + +type writer struct { + mu sync.Mutex + out io.Writer + lvl backend.Level + layout layout.Layout + forceFlush bool +} + +func NewWriter(out io.Writer, lvl backend.Level, layout layout.Factory, forceFlush bool) (backend.Backend, error) { + l, err := layout(out) + if err != nil { + return nil, err + } + + return &writer{ + out: out, + lvl: lvl, + layout: l, + forceFlush: forceFlush, + }, nil +} + +func Console(lvl backend.Level, layout layout.Factory) (backend.Backend, error) { + return NewWriter(os.Stderr, lvl, layout, true) +} + +func (w *writer) For(name string) backend.Backend { + return w +} + +func (w *writer) IsEnabled(lvl backend.Level) bool { + return lvl >= w.lvl +} + +func (w *writer) UseContext() bool { + return w.layout.UseContext() +} + +func (w *writer) Log(msg backend.Message) { + w.mu.Lock() + defer w.mu.Unlock() + + w.layout.Log(msg) + if !w.forceFlush { + return + } + + // flush if output is buffered + switch f := w.out.(type) { + case interface{ Flush() error }: + f.Flush() + + case interface{ Flush() bool }: + f.Flush() + + case interface{ Flush() }: + f.Flush() + } +} diff --git a/vendor/github.com/urso/ecslog/backend/backend.go b/vendor/github.com/urso/ecslog/backend/backend.go new file mode 100644 index 00000000000..6b441e8f413 --- /dev/null +++ b/vendor/github.com/urso/ecslog/backend/backend.go @@ -0,0 +1,51 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package backend + +import "github.com/urso/diag" + +type Backend interface { + For(name string) Backend + + IsEnabled(lvl Level) bool + UseContext() bool + + Log(Message) +} + +type Level uint8 + +type Message struct { + Name string + Level Level + Caller Caller + Message string + Context *diag.Context + Causes []error +} + +const ( + Trace Level = iota + Debug + Info + Error +) + +func (l Level) String() string { + switch l { + case Trace: + return "trace" + case Debug: + return "debug" + case Info: + return "info" + case Error: + return "error" + default: + return "unknown" + } +} diff --git a/vendor/github.com/urso/ecslog/backend/caller.go b/vendor/github.com/urso/ecslog/backend/caller.go new file mode 100644 index 00000000000..c534ea837da --- /dev/null +++ b/vendor/github.com/urso/ecslog/backend/caller.go @@ -0,0 +1,56 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package backend + +import "runtime" + +type Caller struct { + PC uintptr + file string + function string + line int +} + +func GetCaller(skip int) Caller { + var tmp [1]uintptr + runtime.Callers(skip+2, tmp[:]) + return Caller{PC: tmp[0]} +} + +func (c *Caller) File() string { + if c.PC == 0 || c.file != "" { + return c.file + } + c.load() + return c.file +} + +func (c *Caller) Function() string { + if c.PC == 0 || c.function != "" { + return c.function + } + c.load() + return c.function +} + +func (c *Caller) Line() int { + if c.PC == 0 || c.file != "" { + return c.line + } + c.load() + return c.line +} + +func (c *Caller) load() { + fn := runtime.FuncForPC(c.PC - 1) + if fn != nil { + f, l := fn.FileLine(c.PC - 1) + c.file = f + c.line = l + c.function = fn.Name() + } +} diff --git a/vendor/github.com/urso/ecslog/backend/layout/dyn.go b/vendor/github.com/urso/ecslog/backend/layout/dyn.go new file mode 100644 index 00000000000..a29448f0f8e --- /dev/null +++ b/vendor/github.com/urso/ecslog/backend/layout/dyn.go @@ -0,0 +1,31 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package layout + +import ( + "time" + + "github.com/urso/diag" +) + +func DynTimestamp(layout string) diag.Field { + return diag.Field{Key: "@timestamp", Standardized: true, Value: diag.Value{ + String: layout, Reporter: _tsReporter, + }} +} + +type tsReporter struct{} + +var _tsReporter = tsReporter{} + +func (tsReporter) Type() diag.Type { + return diag.StringType +} + +func (tsReporter) Ifc(v *diag.Value, fn func(interface{})) { + fn(time.Now().Format(v.String)) +} diff --git a/vendor/github.com/urso/ecslog/backend/layout/layout.go b/vendor/github.com/urso/ecslog/backend/layout/layout.go new file mode 100644 index 00000000000..952462e89c0 --- /dev/null +++ b/vendor/github.com/urso/ecslog/backend/layout/layout.go @@ -0,0 +1,20 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package layout + +import ( + "io" + + "github.com/urso/ecslog/backend" +) + +type Factory func(io.Writer) (Layout, error) + +type Layout interface { + UseContext() bool + Log(msg backend.Message) +} diff --git a/vendor/github.com/urso/ecslog/backend/layout/plain.go b/vendor/github.com/urso/ecslog/backend/layout/plain.go new file mode 100644 index 00000000000..d598df143bc --- /dev/null +++ b/vendor/github.com/urso/ecslog/backend/layout/plain.go @@ -0,0 +1,214 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package layout + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "time" + + "github.com/urso/diag" + "github.com/urso/sderr" + + "github.com/urso/ecslog/backend" +) + +type textLayout struct { + out io.Writer + buf bytes.Buffer + withCtx bool +} + +type textCtxPrinter struct { + buf *bytes.Buffer + n int +} + +// maximum logger buffer size to keep in between calls +const persistentTextBufferSize = 512 + +func Text(withCtx bool) Factory { + return func(out io.Writer) (Layout, error) { + return &textLayout{ + out: out, + withCtx: withCtx, + }, nil + } +} + +func (l *textLayout) UseContext() bool { + return l.withCtx +} + +func (l *textLayout) Log(msg backend.Message) { + defer func() { + if l.buf.Len()+l.buf.Cap() > persistentTextBufferSize { + l.buf = bytes.Buffer{} + } else { + l.buf.Reset() + } + }() + + ts := time.Now() + + l.buf.WriteString(ts.Format(time.RFC3339)) + l.buf.WriteByte(' ') + l.buf.WriteString(l.level(msg.Level)) + l.buf.WriteByte('\t') + if msg.Name != "" { + fmt.Fprintf(&l.buf, "'%v' - ", msg.Name) + } + + caller := msg.Caller + fmt.Fprintf(&l.buf, "%v:%d", filepath.Base(caller.File()), caller.Line()) + l.buf.WriteByte('\t') + l.buf.WriteString(msg.Message) + + msg.Context.VisitKeyValues(&textCtxPrinter{buf: &l.buf}) + l.buf.WriteRune('\n') + + // write errors + switch len(msg.Causes) { + case 0: + // do nothing + + case 1: + if ioErr := l.OnErrorValue(msg.Causes[0], "\t"); ioErr != nil { + return + } + + case 2: + written := 0 + l.buf.WriteString("\tcaused by:\n") + for _, err := range msg.Causes { + if err == nil { + continue + } + + if written != 0 { + l.buf.WriteString("\tand\n") + } + + written++ + if ioErr := l.OnErrorValue(err, "\t "); ioErr != nil { + return + } + } + } + + l.out.Write(l.buf.Bytes()) +} + +func (l *textLayout) OnErrorValue(err error, indent string) error { + l.buf.WriteString(indent) + + if file, line := sderr.At(err); file != "" { + fmt.Fprintf(&l.buf, "%v:%v\t", filepath.Base(file), line) + } + + l.buf.WriteString(err.Error()) + + if l.withCtx { + if ctx := sderr.Context(err); ctx.Len() > 0 { + ctx.VisitKeyValues(&textCtxPrinter{buf: &l.buf}) + } + } + + if _, ioErr := l.buf.WriteRune('\n'); ioErr != nil { + return ioErr + } + + n := sderr.NumCauses(err) + switch n { + case 0: + // do nothing + case 1: + cause := sderr.Unwrap(err) + if cause != nil { + return l.OnErrorValue(cause, indent) + } + default: + causeIndent := indent + " " + written := 0 + fmt.Fprintf(&l.buf, "%vmulti-error caused by:\n", indent) + for i := 0; i < n; i++ { + cause := sderr.Cause(err, i) + if cause != nil { + if written != 0 { + fmt.Fprintf(&l.buf, "%vand\n", indent) + } + + written++ + if err := l.OnErrorValue(cause, causeIndent); err != nil { + return err + } + } + } + } + + return nil +} + +func (_ *textLayout) level(lvl backend.Level) string { + switch lvl { + case backend.Trace: + return "TRACE" + case backend.Debug: + return "DEBUG" + case backend.Info: + return "INFO" + case backend.Error: + return "ERROR" + default: + return fmt.Sprintf("<%v>", lvl) + } +} + +func (p *textCtxPrinter) OnObjStart(key string) error { + if err := p.onKey(key); err != nil { + return err + } + _, err := p.buf.WriteRune('{') + return err +} + +func (p *textCtxPrinter) OnObjEnd() error { + _, err := p.buf.WriteRune('}') + return err +} + +func (p *textCtxPrinter) OnValue(key string, v diag.Value) (err error) { + p.onKey(key) + v.Reporter.Ifc(&v, func(value interface{}) { + switch v := value.(type) { + case *diag.Context: + p.buf.WriteRune('{') + err = v.VisitKeyValues(p) + p.buf.WriteRune('}') + case string, []byte: + fmt.Fprintf(p.buf, "%q", v) + default: + fmt.Fprintf(p.buf, "%v", v) + } + }) + + return err +} + +func (p *textCtxPrinter) onKey(key string) error { + if p.n > 0 { + p.buf.WriteRune(' ') + } else { + p.buf.WriteString("\t| ") + } + p.buf.WriteString(key) + p.buf.WriteRune('=') + p.n++ + return nil +} diff --git a/vendor/github.com/urso/ecslog/backend/layout/structured.go b/vendor/github.com/urso/ecslog/backend/layout/structured.go new file mode 100644 index 00000000000..2b9a4bed57e --- /dev/null +++ b/vendor/github.com/urso/ecslog/backend/layout/structured.go @@ -0,0 +1,385 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package layout + +import ( + "bytes" + "io" + + "github.com/urso/diag" + "github.com/urso/diag-ecs/ecs" + "github.com/urso/ecslog/backend" + "github.com/urso/sderr" + + structform "github.com/elastic/go-structform" + "github.com/elastic/go-structform/cborl" + "github.com/elastic/go-structform/gotype" + "github.com/elastic/go-structform/json" + "github.com/elastic/go-structform/ubjson" +) + +type structLayout struct { + out io.Writer + buf bytes.Buffer + fields *diag.Context + makeEncoder func(io.Writer) structform.Visitor + types *gotype.Iterator + typeOpts []gotype.FoldOption + visitor structform.Visitor +} + +type structVisitor structLayout + +// errorVal is used to wrap errors, so to notify encoding callback that +// we're dealing with special error value who's context doesn't need to be +// reported. +type errorVal struct { + err error +} + +// multiErrOf is used to wrap a multierror, so to notify the encoding +// callback that we're dealing with a special error value. +// Each error in the multierror must be deal separately, creating and reporting +// it's local context. +type multiErrOf struct { + err error +} + +type multiErr struct { + errs []error +} + +func JSON(fields []diag.Field, opts ...gotype.FoldOption) Factory { + return Structured(func(w io.Writer) structform.Visitor { + return json.NewVisitor(w) + }, fields, opts...) +} + +func UBJSON(fields []diag.Field, opts ...gotype.FoldOption) Factory { + return Structured(func(w io.Writer) structform.Visitor { + return ubjson.NewVisitor(w) + }, fields, opts...) +} + +func CBOR(fields []diag.Field, opts ...gotype.FoldOption) Factory { + return Structured(func(w io.Writer) structform.Visitor { + return cborl.NewVisitor(w) + }, fields, opts...) +} + +func Structured( + makeEncoder func(io.Writer) structform.Visitor, + fields []diag.Field, + opts ...gotype.FoldOption, +) Factory { + return func(out io.Writer) (Layout, error) { + logCtx := diag.NewContext(nil, nil) + logCtx.AddFields(fields...) + + l := &structLayout{ + out: out, + fields: logCtx, + makeEncoder: makeEncoder, + typeOpts: opts, + } + l.reset() + return l, nil + } +} + +func (l *structLayout) reset() { + l.buf.Reset() + visitor := l.makeEncoder(&l.buf) + l.types, _ = gotype.NewIterator(visitor, l.typeOpts...) + l.visitor = visitor +} + +func (l *structLayout) UseContext() bool { return true } + +func (l *structLayout) Log(msg backend.Message) { + var userCtx, stdCtx *diag.Context + + if msg.Context.Len() > 0 { + userCtx = msg.Context.User() + stdCtx = msg.Context.Standardized() + } + + file := msg.Caller.File() + + ctx := diag.NewContext(stdCtx, nil) + ctx.AddFields([]diag.Field{ + ecs.Log.Level(msg.Level.String()), + + ecs.Log.Origin.File.Name(file), + ecs.Log.Origin.File.Line(msg.Caller.Line()), + + ecs.Message(msg.Message), + }...) + if msg.Name != "" { + ctx.AddField(ecs.Log.Logger(msg.Name)) + } + + if userCtx.Len() > 0 { + ctx.AddField(diag.Any("fields", &userCtx)) + } + + // Add error values to the context. So to guarantee an error value is not + // missed we use fully qualified names here. + switch len(msg.Causes) { + case 0: + break + case 1: + cause := msg.Causes[0] + if errCtx := buildErrCtx(cause); errCtx.Len() > 0 { + ctx.AddField(diag.Any("error.ctx", &errCtx)) + } + ctx.AddField(diag.String("error.message", cause.Error())) + + if file, line := sderr.At(cause); file != "" { + ctx.AddField(diag.String("error.at.file", file)) + ctx.AddField(diag.Int("error.at.line", line)) + } + + n := sderr.NumCauses(cause) + switch n { + case 0: + // nothing + case 1: + ctx.AddField(diag.Any("error.cause", errorVal{sderr.Unwrap(cause)})) + + default: + ctx.AddField(diag.Any("error.causes", multiErrOf{cause})) + } + + default: + ctx.AddField(diag.Any("error.causes", multiErr{msg.Causes})) + } + + // link predefined fields + if l.fields.Len() > 0 { + ctx = diag.NewContext(l.fields, ctx) + } + + v := (*structVisitor)(l) + if err := v.Process(ctx); err != nil { + l.reset() + } else { + l.out.Write(l.buf.Bytes()) + l.buf.Reset() + } +} + +func (v *structVisitor) Process(ctx *diag.Context) error { + if err := v.Begin(); err != nil { + return err + } + if err := ctx.VisitStructured(v); err != nil { + return err + } + return v.End() +} + +func (v *structVisitor) Begin() error { return v.visitor.OnObjectStart(-1, structform.AnyType) } +func (v *structVisitor) End() error { return v.visitor.OnObjectFinished() } + +func (v structVisitor) OnObjStart(key string) error { + if err := v.visitor.OnKey(key); err != nil { + return err + } + return v.visitor.OnObjectStart(-1, structform.AnyType) +} + +func (v structVisitor) OnObjEnd() error { + return v.visitor.OnObjectFinished() +} + +func (v structVisitor) OnValue(key string, val diag.Value) error { + var err error + + if err = v.visitor.OnKey(key); err != nil { + return err + } + + val.Reporter.Ifc(&val, func(ifc interface{}) { + switch val := ifc.(type) { + case *diag.Context: + if err = v.Begin(); err != nil { + return + } + if err = val.VisitStructured(v); err != nil { + return + } + err = v.End() + + case errorVal: // error cause + err = v.OnErrorValue(val.err, false) + + case multiErrOf: + err = v.OnMultiErrValueIter(val.err) + + case multiErr: + err = v.OnMultiErr(val.errs) + + default: + err = v.types.Fold(ifc) + } + }) + + return err +} + +func (v structVisitor) OnErrorValue(err error, withCtx bool) error { + if err := v.Begin(); err != nil { + return err + } + + if file, line := sderr.At(err); file != "" { + if err := v.visitor.OnKey("at"); err != nil { + return err + } + if err := v.Begin(); err != nil { + return err + } + if err := v.visitor.OnKey("file"); err != nil { + return err + } + if err := v.visitor.OnString(file); err != nil { + return err + } + if err := v.visitor.OnKey("line"); err != nil { + return err + } + if err := v.visitor.OnInt(line); err != nil { + return err + } + if err := v.End(); err != nil { + return err + } + } + + if withCtx { + ctx := buildErrCtx(err) + if ctx.Len() > 0 { + if err := v.visitor.OnKey("ctx"); err != nil { + return err + } + if err := v.Begin(); err != nil { + return err + } + if err := ctx.VisitStructured(v); err != nil { + return err + } + if err := v.End(); err != nil { + return err + } + } + } + + n := sderr.NumCauses(err) + switch n { + case 0: + // nothing to do + + case 1: + // add cause + cause := sderr.Cause(err, 0) + if cause != nil { + if err := v.OnValue("cause", diag.ValAny(errorVal{cause})); err != nil { + return err + } + } + + default: + if err := v.OnValue("causes", diag.ValAny(multiErrOf{err})); err != nil { + return err + } + + } + + if err := v.visitor.OnKey("message"); err != nil { + return err + } + if err := v.visitor.OnString(err.Error()); err != nil { + return err + } + + return v.End() +} + +func (v structVisitor) OnMultiErrValueIter(parent error) error { + if err := v.visitor.OnArrayStart(-1, structform.AnyType); err != nil { + return err + } + + n := sderr.NumCauses(parent) + for i := 0; i < n; i++ { + cause := sderr.Cause(parent, i) + if cause != nil { + if err := v.OnErrorValue(cause, true); err != nil { + return err + } + } + } + + return v.visitor.OnArrayFinished() +} + +func (v structVisitor) OnMultiErr(errs []error) error { + if err := v.visitor.OnArrayStart(-1, structform.AnyType); err != nil { + return err + } + + for _, err := range errs { + if err != nil { + if err := v.OnErrorValue(err, true); err != nil { + return err + } + } + } + + return v.visitor.OnArrayFinished() +} + +func buildErrCtx(err error) (errCtx *diag.Context) { + var linkedCtx *diag.Context + + causeCtx := sderr.Context(err) + if causeCtx.Len() > 0 { + linkedCtx = linkLinearErrCtx(causeCtx, err) + } else { + linkedCtx = linkLinearErrCtx(linkedCtx, err) + } + + stdCtx := linkedCtx.Standardized() + errCtx = diag.NewContext(stdCtx, nil) + + if userCtx := linkedCtx.User(); userCtx.Len() > 0 { + errCtx.AddField(diag.Any("fields", &userCtx)) + } + + return errCtx +} + +// linkLinearErrCtx links all error context in a linear chain. Stops if a +// multierror is discovered. +func linkLinearErrCtx(ctx *diag.Context, err error) *diag.Context { + for err != nil { + n := sderr.NumCauses(err) + if n != 1 { + return ctx + } + + cause := sderr.Unwrap(err) + causeCtx := sderr.Context(cause) + if causeCtx.Len() > 0 { + ctx = diag.NewContext(ctx, causeCtx) + } + + err = cause + } + return ctx +} diff --git a/vendor/github.com/urso/ecslog/go.mod b/vendor/github.com/urso/ecslog/go.mod new file mode 100644 index 00000000000..01440a05988 --- /dev/null +++ b/vendor/github.com/urso/ecslog/go.mod @@ -0,0 +1,12 @@ +module github.com/urso/ecslog + +go 1.14 + +require ( + github.com/elastic/go-structform v0.0.6 + github.com/stretchr/testify v1.4.0 // indirect + github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797 + github.com/urso/diag-ecs v0.0.0-20200210114345-ab085841dcb9 + github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect +) diff --git a/vendor/github.com/urso/ecslog/go.sum b/vendor/github.com/urso/ecslog/go.sum new file mode 100644 index 00000000000..16bdba2df1e --- /dev/null +++ b/vendor/github.com/urso/ecslog/go.sum @@ -0,0 +1,101 @@ +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/elastic/go-structform v0.0.6 h1:wqeK4LwD2NNDOoRGTImE24S6pkCDVr8+oUSIkmChzLk= +github.com/elastic/go-structform v0.0.6/go.mod h1:QrMyP3oM9Sjk92EVGLgRaL2lKt0Qx7ZNDRWDxB6khVs= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE= +github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= +github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/urso/diag v0.0.0-20200125202105-ffdc32ff5518/go.mod h1:pNWFTeQ+V1OYT/TzWpnWb6eQBdoXpdx+H+lrH97/Oyo= +github.com/urso/diag v0.0.0-20200209092937-2103d7fb4c5a h1:qIBEAlqzmD0hd2iNZdx3l/DB3dR8KXgckdO1cuq15aU= +github.com/urso/diag v0.0.0-20200209092937-2103d7fb4c5a/go.mod h1:pNWFTeQ+V1OYT/TzWpnWb6eQBdoXpdx+H+lrH97/Oyo= +github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797 h1:OHNw/6pXODJAB32NujjdQO/KIYQ3KAbHQfCzH81XdCs= +github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797/go.mod h1:pNWFTeQ+V1OYT/TzWpnWb6eQBdoXpdx+H+lrH97/Oyo= +github.com/urso/diag-ecs v0.0.0-20200210114345-ab085841dcb9 h1:GzsakegdLNhw0mF2fcFd+BgdY8owV+4Y+6MvbgRXIWg= +github.com/urso/diag-ecs v0.0.0-20200210114345-ab085841dcb9/go.mod h1:+1Ug5A104KCMD8ZZ4YarKGNSt8ANJWA7nWqji37BmrQ= +github.com/urso/magetools v0.0.0-20200125210132-c2e338f92f3a h1:jWAaRFnay3H2e6S0GGCl5nKrkgQNlarCE/kvcutzBmw= +github.com/urso/magetools v0.0.0-20200125210132-c2e338f92f3a/go.mod h1:DbaJnRzkGaWrMWm5Hz6QVnUj//x9/zjrfx8bF3J+GJY= +github.com/urso/sderr v0.0.0-20200210115749-5d311cd31803 h1:RYGgCUSEAtZIm9sBXag/juPsicMrUHM9iH6UUDwEmOc= +github.com/urso/sderr v0.0.0-20200210115749-5d311cd31803/go.mod h1:Mp7g3iltlbxKabpZGPpmgnk5gYJIdqTW0jUaPLIoTTU= +github.com/urso/sderr v0.0.0-20200210123314-7bfa1f17eeb6 h1:uDmhlHE0cdQqCgUzvq77jz6g+BsqbhJwcKiZ0sXckBg= +github.com/urso/sderr v0.0.0-20200210123314-7bfa1f17eeb6/go.mod h1:Wp40HwmjM59FkDIVFfcCb9LzBbnc0XAMp8++hJuWvSU= +github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec h1:HkZIDJrMKZHPsYhmH2XjTTSk1pbMCFfpxSnyzZUFm+k= +github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec/go.mod h1:Wp40HwmjM59FkDIVFfcCb9LzBbnc0XAMp8++hJuWvSU= +github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e h1:D5TXcfTk7xF7hvieo4QErS3qqCB4teTffacDWr7CI+0= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= +gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE= +gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/urso/ecslog/log.go b/vendor/github.com/urso/ecslog/log.go new file mode 100644 index 00000000000..2a26bedf451 --- /dev/null +++ b/vendor/github.com/urso/ecslog/log.go @@ -0,0 +1,259 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package ecslog + +import ( + "context" + "fmt" + "strconv" + + "github.com/urso/diag" + "github.com/urso/diag/ctxfmt" + "github.com/urso/ecslog/backend" +) + +type Logger struct { + ctx *diag.Context + name string + backend backend.Backend +} + +type Level = backend.Level + +const ( + Trace Level = backend.Trace + Debug Level = backend.Debug + Info Level = backend.Info + Error Level = backend.Error +) + +func New(backend backend.Backend) *Logger { + return &Logger{ + ctx: diag.NewContext(nil, nil), + name: "", + backend: backend, + } +} + +func (l *Logger) IsEnabled(lvl Level) bool { + return l.backend.IsEnabled(lvl) +} + +func (l *Logger) Named(name string) *Logger { + return &Logger{ + ctx: diag.NewContext(l.ctx, nil), + backend: l.backend.For(name), + name: name, + } +} + +func (l *Logger) With(args ...interface{}) *Logger { + nl := &Logger{ + ctx: diag.NewContext(l.ctx, nil), + backend: l.backend, + } + nl.ctx.AddAll(args...) + return nl +} + +func (l *Logger) WithFields(fields ...diag.Field) *Logger { + nl := &Logger{ + ctx: diag.NewContext(l.ctx, nil), + backend: l.backend, + } + nl.ctx.AddFields(fields...) + return nl +} + +func (l *Logger) WithDiagnosticContext(ctx *diag.Context) *Logger { + if ctx.Len() == 0 { + return l.With() + } + + var merged *diag.Context + if l.ctx.Len() == 0 { + merged = ctx + } else { + merged = diag.NewContext(l.ctx, ctx) + } + return &Logger{ + ctx: diag.NewContext(merged, nil), + backend: l.backend, + } +} + +func (l *Logger) WithDiagnotics(ctx context.Context) *Logger { + dc, _ := diag.DiagnosticsFrom(ctx) + if dc.Len() == 0 { + return l.With() + } + return l.WithDiagnosticContext(dc) +} + +func (l *Logger) Trace(args ...interface{}) { l.log(Trace, 1, args) } +func (l *Logger) Tracef(msg string, args ...interface{}) { l.logf(Trace, 1, msg, args) } + +func (l *Logger) Debug(args ...interface{}) { l.log(Debug, 1, args) } +func (l *Logger) Debugf(msg string, args ...interface{}) { l.logf(Debug, 1, msg, args) } + +func (l *Logger) Info(args ...interface{}) { l.log(Info, 1, args) } +func (l *Logger) Infof(msg string, args ...interface{}) { l.logf(Info, 1, msg, args) } + +func (l *Logger) Error(args ...interface{}) { l.log(Error, 1, args) } +func (l *Logger) Errorf(msg string, args ...interface{}) { l.logf(Error, 1, msg, args) } + +func (l *Logger) log(lvl Level, skip int, args []interface{}) { + if !l.IsEnabled(lvl) { + return + } + + if l.backend.UseContext() { + l.logArgsCtx(lvl, skip+1, args) + } else { + l.logArgs(lvl, skip+1, args) + } +} + +func (l *Logger) logf(lvl Level, skip int, msg string, args []interface{}) { + if !l.IsEnabled(lvl) { + return + } + + if l.backend.UseContext() { + l.logfMsgCtx(lvl, skip+1, msg, args) + } else { + l.logfMsg(lvl, skip+1, msg, args) + } +} + +func (l *Logger) logArgsCtx(lvl Level, skip int, args []interface{}) { + msg := argsMessage(args) + ctx := diag.NewContext(l.ctx, nil) + + var causes []error + for _, arg := range args { + switch v := arg.(type) { + case diag.Field: + ctx.AddField(v) + case error: + causes = append(causes, v) + } + } + + l.backend.Log(backend.Message{ + Name: l.name, + Level: lvl, + Caller: getCaller(skip + 1), + Message: msg, + Context: ctx, + Causes: causes, + }) +} + +func (l *Logger) logArgs(lvl Level, skip int, args []interface{}) { + msg := argsMessage(args) + + var causes []error + for _, arg := range args { + if err, ok := arg.(error); ok { + causes = append(causes, err) + } + } + l.backend.Log(backend.Message{ + Name: l.name, + Level: lvl, + Caller: getCaller(skip + 1), + Message: msg, + Context: diag.NewContext(nil, nil), + Causes: causes, + }) +} + +func argsMessage(args []interface{}) string { + if len(args) == 0 { + return "" + } + if len(args) == 1 { + if str, ok := args[0].(string); ok { + return str + } + } + return fmt.Sprint(args...) +} + +func (l *Logger) logfMsgCtx(lvl Level, skip int, msg string, args []interface{}) { + ctx := diag.NewContext(l.ctx, nil) + var causes []error + msg, rest := ctxfmt.Sprintf(func(key string, idx int, val interface{}) { + if field, ok := (val).(diag.Field); ok { + if key != "" { + ctx.Add(fmt.Sprintf("%v.%v", key, field.Key), field.Value) + } else { + ctx.AddField(field) + } + return + } + + switch v := val.(type) { + case diag.Value: + ctx.Add(ensureKey(key, idx), v) + case error: + causes = append(causes, v) + if key != "" { + ctx.AddField(diag.String(key, v.Error())) + } + default: + ctx.AddField(diag.Any(ensureKey(key, idx), val)) + } + }, msg, args...) + + if len(rest) > 0 { + msg = fmt.Sprintf("%s {EXTRA_FIELDS: %v}", msg, rest) + } + + l.backend.Log(backend.Message{ + Name: l.name, + Level: lvl, + Caller: getCaller(skip + 1), + Message: msg, + Context: ctx, + Causes: causes, + }) +} + +func (l *Logger) logfMsg(lvl Level, skip int, msg string, args []interface{}) { + var causes []error + msg, rest := ctxfmt.Sprintf(func(key string, idx int, val interface{}) { + if err, ok := val.(error); ok { + causes = append(causes, err) + } + }, msg, args...) + + if len(rest) > 0 { + msg = fmt.Sprintf("%s {EXTRA_FIELDS: %v}", msg, rest) + } + + l.backend.Log(backend.Message{ + Name: l.name, + Level: lvl, + Caller: getCaller(skip + 1), + Message: msg, + Context: diag.NewContext(nil, nil), + Causes: causes, + }) +} + +func ensureKey(key string, idx int) string { + if key == "" { + return strconv.FormatInt(int64(idx), 10) + } + return key +} + +func getCaller(skip int) backend.Caller { + return backend.GetCaller(skip + 1) +} diff --git a/vendor/github.com/urso/magetools/gotool/gotool.go b/vendor/github.com/urso/magetools/gotool/gotool.go index a93223ab742..aa6fb5fcec5 100644 --- a/vendor/github.com/urso/magetools/gotool/gotool.go +++ b/vendor/github.com/urso/magetools/gotool/gotool.go @@ -16,6 +16,7 @@ type Go struct { Build GoBuild List GoList + Run GoRun Test GoTest } @@ -27,6 +28,7 @@ func New(exec clitool.Executor, path string) *Go { g.Build = makeBuild(g) g.List = makeList(g) g.Test = makeTest(g) + g.Run = makeRun(g) return g } diff --git a/vendor/github.com/urso/magetools/gotool/run.go b/vendor/github.com/urso/magetools/gotool/run.go new file mode 100644 index 00000000000..a3f841560a0 --- /dev/null +++ b/vendor/github.com/urso/magetools/gotool/run.go @@ -0,0 +1,35 @@ +package gotool + +import ( + "context" + "os" + "strings" + + "github.com/urso/magetools/clitool" +) + +type GoRun func(context context.Context, opts ...clitool.ArgOpt) error + +type goRun struct { + g *Go +} + +func makeRun(g *Go) GoRun { + gr := &goRun{g} + return gr.Do +} + +func (gr *goRun) Do(context context.Context, opts ...clitool.ArgOpt) error { + return gr.g.ExecGo(context, []string{"run"}, clitool.CreateArgs(opts...), os.Stdout, os.Stderr) +} + +func (GoRun) Tags(tags ...string) clitool.ArgOpt { + return clitool.FlagIf("-tags", strings.Join(tags, " ")) +} +func (GoRun) Script(files ...string) clitool.ArgOpt { + return clitool.Positional(files...) +} +func (GoRun) ScriptArgs(opts ...clitool.ArgOpt) clitool.ArgOpt { + args := clitool.CreateArgs(opts...).Build() + return clitool.Positional(args...) +} diff --git a/vendor/github.com/urso/sderr/LICENSE b/vendor/github.com/urso/sderr/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/urso/sderr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/urso/sderr/builder.go b/vendor/github.com/urso/sderr/builder.go new file mode 100644 index 00000000000..19ed82cfbc6 --- /dev/null +++ b/vendor/github.com/urso/sderr/builder.go @@ -0,0 +1,151 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package sderr + +import ( + "context" + "fmt" + + "github.com/urso/diag" + "github.com/urso/diag/ctxfmt" +) + +type Builder struct { + ctx *diag.Context + withStack bool +} + +func (b *Builder) With(fields ...interface{}) *Builder { + ctx := diag.NewContext(b.ctx, nil) + ctx.AddAll(fields...) + return &Builder{ctx: ctx, withStack: b.withStack} +} + +func (b *Builder) WithStack() *Builder { + var ctx *diag.Context + if b.ctx.Len() > 0 { + ctx = diag.NewContext(b.ctx, nil) + } + return &Builder{ctx: ctx, withStack: true} +} + +func (b *Builder) WithDiagnosticContext(ctx *diag.Context) *Builder { + merged := diag.NewContext(b.ctx, ctx) + return &Builder{ctx: diag.NewContext(merged, nil), withStack: b.withStack} +} + +func (b *Builder) WithDiagnotics(ctx context.Context) *Builder { + dc, _ := diag.DiagnosticsFrom(ctx) + return b.WithDiagnosticContext(dc) +} + +func (b *Builder) Errf(msg string, vs ...interface{}) error { + return b.doErrf(1, msg, vs) +} + +func (b *Builder) doErrf(skip int, msg string, vs []interface{}) error { + val, causes := b.makeErrValue(skip+1, msg, vs) + switch len(causes) { + case 0: + return &val + case 1: + return &wrappedErrValue{errValue: val, cause: causes[0]} + default: + return &multiErrValue{errValue: val, causes: causes} + } +} + +func (b *Builder) Wrap(cause error, msg string, vs ...interface{}) error { + return b.doWrap(1, cause, msg, vs) +} + +func (b *Builder) doWrap(skip int, cause error, msg string, vs []interface{}) error { + val, extra := b.makeErrValue(skip+1, msg, vs) + if len(extra) > 0 { + if cause != nil { + extra = append(extra, cause) + } + + if len(extra) == 1 { + return &wrappedErrValue{errValue: val, cause: extra[0]} + } + return &multiErrValue{errValue: val, causes: extra} + } + + if cause == nil { + return &val + } + + return &wrappedErrValue{errValue: val, cause: cause} +} + +func (b *Builder) WrapAll(causes []error, msg string, vs ...interface{}) error { + return b.doWrapAll(1, causes, msg, vs) +} + +func (b *Builder) doWrapAll(skip int, causes []error, msg string, vs []interface{}) error { + if len(causes) == 0 { + return nil + } + + val, extra := b.makeErrValue(skip+1, msg, vs) + if len(extra) > 0 { + causes = append(extra, causes...) + } + + return &multiErrValue{errValue: val, causes: causes} +} + +func (b *Builder) makeErrValue(skip int, msg string, vs []interface{}) (errValue, []error) { + var ctx *diag.Context + var causes []error + + errorMessage, _ := ctxfmt.Sprintf(func(key string, idx int, val interface{}) { + if ctx == nil { + ctx = diag.NewContext(b.ctx, nil) + } + + if field, ok := (val).(diag.Field); ok { + if key != "" { + ctx.Add(fmt.Sprintf("%v.%v", key, field.Key), field.Value) + } else { + ctx.AddField(field) + } + return + } + + switch v := val.(type) { + case diag.Value: + ctx.Add(ensureKey(key, idx), v) + case error: + causes = append(causes, v) + if key != "" { + ctx.AddField(diag.String(key, v.Error())) + } + default: + ctx.AddField(diag.Any(ensureKey(key, idx), val)) + } + + }, msg, vs...) + + if ctx == nil { + ctx = b.ctx + } + + var stack StackTrace + if b.withStack { + stack = makeStackTrace(skip + 1) + } + return errValue{at: getCaller(skip + 1), msg: errorMessage, ctx: ctx, stack: stack}, causes +} + +func ensureKey(key string, idx int) string { + if key == "" { + return fmt.Sprintf("%v", idx) + } + return key +} diff --git a/vendor/github.com/urso/sderr/errors.go b/vendor/github.com/urso/sderr/errors.go new file mode 100644 index 00000000000..c6ef410ac3c --- /dev/null +++ b/vendor/github.com/urso/sderr/errors.go @@ -0,0 +1,252 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package sderr + +import ( + "bufio" + "fmt" + "io" + "path/filepath" + "strings" + + "github.com/urso/diag" +) + +type errWithStack struct { + err error +} + +type errValue struct { + at loc + stack StackTrace + msg string + ctx *diag.Context +} + +type wrappedErrValue struct { + errValue + cause error +} + +type multiErrValue struct { + errValue + causes []error +} + +type ctxValBuf strings.Builder + +func (e *errValue) At() (string, int) { + return e.at.file, e.at.line +} + +func (e *errValue) StackTrace() StackTrace { + return e.stack +} + +func (e *errValue) Context() *diag.Context { + if e.ctx.Len() == 0 { + return nil + } + return diag.NewContext(e.ctx, nil) +} + +func (e *errValue) Error() string { + return e.report(false) +} + +func (e *errValue) Format(st fmt.State, c rune) { + switch c { + case 'v': + if st.Flag('+') { + io.WriteString(st, e.report(true)) + return + } + fallthrough + case 's': + io.WriteString(st, e.report(false)) + case 'q': + io.WriteString(st, fmt.Sprintf("%q", e.report(false))) + default: + panic("unsupported format directive") + } +} + +func (e *errValue) report(verbose bool) string { + buf := &strings.Builder{} + + if !verbose && e.msg != "" { + return e.msg + } + + if verbose && e.msg != "" { + fmt.Fprintf(buf, "%v:%v", filepath.Base(e.at.file), e.at.line) + } + + putStr(buf, e.msg) + + if verbose && e.ctx.Len() > 0 { + pad(buf, " ") + buf.WriteRune('(') + e.ctx.VisitKeyValues((*ctxValBuf)(buf)) + buf.WriteRune(')') + } + + return buf.String() +} + +func (e *wrappedErrValue) Error() string { + return e.report(false) +} + +func (e *wrappedErrValue) Unwrap() error { + return e.cause +} + +func (e *wrappedErrValue) Format(st fmt.State, c rune) { + switch c { + case 'v': + if st.Flag('+') { + io.WriteString(st, e.report(true)) + return + } + fallthrough + case 's': + io.WriteString(st, e.report(false)) + case 'q': + io.WriteString(st, fmt.Sprintf("%q", e.report(false))) + default: + panic("unsupported format directive") + } +} + +func (e *wrappedErrValue) report(verbose bool) string { + buf := &strings.Builder{} + buf.WriteString(e.errValue.report(verbose)) + sep := ": " + if verbose && e.cause != nil { + sep = "\n\t" + putSubErr(buf, sep, e.cause, verbose) + } + return buf.String() +} + +func (e *multiErrValue) Unwrap() error { + if len(e.causes) == 0 { + return nil + } + return e.Cause(0) +} + +func (e *multiErrValue) NumCauses() int { + return len(e.causes) +} + +func (e *multiErrValue) Cause(i int) error { + if i < len(e.causes) { + return e.causes[i] + } + return nil +} + +func (e *multiErrValue) Format(s fmt.State, c rune) { + switch c { + case 'v': + if s.Flag('+') { + io.WriteString(s, e.report(true)) + return + } + fallthrough + case 's': + io.WriteString(s, e.report(false)) + case 'q': + io.WriteString(s, fmt.Sprintf("%q", e.report(false))) + default: + panic("unsupported format directive") + } +} + +func (e *multiErrValue) report(verbose bool) string { + buf := &strings.Builder{} + buf.WriteString(e.errValue.report(verbose)) + sep := ": " + if verbose { + for _, cause := range e.causes { + sep = "\n\t" + putSubErr(buf, sep, cause, verbose) + } + } + return buf.String() +} + +func (b *ctxValBuf) OnObjStart(key string) error { + _, err := fmt.Fprintf((*strings.Builder)(b), "%v={", key) + return err +} + +func (b *ctxValBuf) OnObjEnd() error { + _, err := fmt.Fprint((*strings.Builder)(b), "}") + return err +} + +func (b *ctxValBuf) OnValue(key string, v diag.Value) (err error) { + v.Reporter.Ifc(&v, func(val interface{}) { + _, err = fmt.Fprintf((*strings.Builder)(b), "%v=%v", key, val) + }) + return err +} + +func pad(buf *strings.Builder, pattern string) bool { + if buf.Len() == 0 { + return false + } + + buf.WriteString(pattern) + return true +} + +func putStr(buf *strings.Builder, s string) bool { + if s == "" { + return false + } + pad(buf, ": ") + buf.WriteString(s) + return true +} + +func putSubErr(b *strings.Builder, sep string, err error, verbose bool) bool { + if err == nil { + return false + } + + var s string + if verbose { + s = fmt.Sprintf("%+v", err) + } else { + s = fmt.Sprintf("%v", err) + } + + if s == "" { + return false + } + + pad(b, sep) + + // iterate lines + r := strings.NewReader(s) + scanner := bufio.NewScanner(r) + first := true + for scanner.Scan() { + if !first { + pad(b, sep) + } else { + first = false + } + + b.WriteString(scanner.Text()) + } + return true +} diff --git a/vendor/github.com/urso/sderr/go.mod b/vendor/github.com/urso/sderr/go.mod new file mode 100644 index 00000000000..6fd02b46e36 --- /dev/null +++ b/vendor/github.com/urso/sderr/go.mod @@ -0,0 +1,5 @@ +module github.com/urso/sderr + +go 1.13 + +require github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797 diff --git a/vendor/github.com/urso/sderr/go.sum b/vendor/github.com/urso/sderr/go.sum new file mode 100644 index 00000000000..5108f1f1317 --- /dev/null +++ b/vendor/github.com/urso/sderr/go.sum @@ -0,0 +1,6 @@ +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797 h1:OHNw/6pXODJAB32NujjdQO/KIYQ3KAbHQfCzH81XdCs= +github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797/go.mod h1:pNWFTeQ+V1OYT/TzWpnWb6eQBdoXpdx+H+lrH97/Oyo= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/urso/sderr/query.go b/vendor/github.com/urso/sderr/query.go new file mode 100644 index 00000000000..49fb1b48167 --- /dev/null +++ b/vendor/github.com/urso/sderr/query.go @@ -0,0 +1,365 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package sderr + +import ( + "reflect" + + "github.com/urso/diag" +) + +// errCheckIs is an optional interface that allows users and errors to +// customize the behavior of `Is`, such the custom error types can be matched, +// or private fields can be checked. +type errCheckIs interface { + Is(error) bool +} + +// errConv is used by `As` to allow for some customaized conversation from err +// to target in As. +type errConv interface { + As(interface{}) bool +} + +// causer interface allows us to iterate errors with unique causes. +// The interface is compatible to github.com/pkg/errors. +type causer interface { + Cause() error +} + +type unwrapper interface { + Unwrap() error +} + +// multiCauser interface allows us to efficiently iterate a set of causes +// leading to the current error. +// The interface is compatible to github.com/urso/ecslog/errx +type multiCauser interface { + NumCauses() int + Cause(i int) error +} + +// wrappedError is compatible to github.com/hashicorp/go-multierror +type wrappedError interface { + WrappedErrors() []error +} + +var tError = reflect.TypeOf((*error)(nil)).Elem() + +// At returns the file name and line the error originated from (if present) +func At(err error) (string, int) { + if pe, ok := err.(interface{ At() (string, int) }); ok { + return pe.At() + } + return "", 0 +} + +// Trace returns the stack trace, if the error value contains one. +func Trace(err error) StackTrace { + if se, ok := err.(interface{ StackTrace() StackTrace }); ok { + return se.StackTrace() + } + return nil +} + +// Context returns the errors diagnostic context, if the direct error value has a context. +func Context(err error) *diag.Context { + if ce, ok := err.(interface{ Context() *diag.Context }); ok { + return ce.Context() + } + return nil +} + +// Is walks the complete error tree, trying to find an error that matches +// target. +// +// An error is matched with target, if the error is equal to the target, +// if error implements Is(error) bool that returns true for target, or +// target implements Is(error) bool such that Is(error) returne true. +// +// Use `Find` to return the actual error value that would match. +func Is(err, target error) bool { + if err == nil { + return err == target + } + + return Find(err, target) != nil +} + +// IsIf walkt the complete error try, trying to match an error +// with the given predicate. +// +// Use `FindIf` to return the actual error value that would match. +func IsIf(err error, pred func(error) bool) bool { + if err == nil { + return pred(err) + } + + return FindIf(err, pred) != nil +} + +// As finds the first error in the error tree that matches target, and if so, sets +// target to that error value and returns true. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +func As(err error, target interface{}) bool { + if target == nil { + panic("errors: target cannot be nil") + } + + val := reflect.ValueOf(target) + typ := val.Type() + if typ.Kind() != reflect.Ptr || val.IsNil() { + panic("errors: target must be a non-nil pointer") + } + + if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(tError) { + panic("errors: *target must be interface or implement error") + } + + targetType := typ.Elem() + assigned := false + Iter(err, func(cur error) bool { + if reflect.TypeOf(cur).AssignableTo(targetType) { + val.Elem().Set(reflect.ValueOf(cur)) + assigned = true + } else if x, ok := err.(errConv); ok && x.As(target) { + assigned = true + } + + // continue searching in error tree until we found a matching error + return !assigned + }) + + return assigned +} + +// Find walks the complete error tree, trying to find an error that matches +// target. The first error matching target is returned. +// +// An error is matched with target, if the error is equal to the target, +// if error implements Is(error) bool that returns true for target, or +// target implements Is(error) bool such that Is(error) returne true. +func Find(err, target error) error { + isComparable := reflect.TypeOf(target).Comparable() + var targetCheck errCheckIs + if tmp, ok := target.(errCheckIs); ok { + targetCheck = tmp + } + + return FindIf(err, func(cur error) bool { + if isComparable && cur == target { + return true + } + + if errCheck, ok := cur.(errCheckIs); ok && errCheck.Is(target) { + return true + } + if targetCheck != nil && targetCheck.Is(cur) { + return true + } + + return false + }) +} + +// FindIf returns the first error in the error tree, that matches the +// given predicate. +func FindIf(in error, pred func(err error) bool) error { + var found error + Iter(in, func(err error) bool { + matches := pred(err) + if matches { + found = err + return false + } + return true + }) + + return found +} + +// NumCauses returns the number of direct errors the error value wraps. +func NumCauses(in error) int { + switch err := in.(type) { + case wrappedError: + return len(err.WrappedErrors()) + case multiCauser: + return err.NumCauses() + case causer: + if err.Cause() == nil { + return 0 + } + return 1 + case unwrapper: + if err.Unwrap() == nil { + return 0 + } + return 1 + default: + return 0 + } +} + +// Cause returns the i-th cause from the error value. +func Cause(in error, i int) error { + switch err := in.(type) { + case multiCauser: + return err.Cause(i) + + case wrappedError: + return err.WrappedErrors()[i] + + case causer: + if i > 0 { + panic("index out of bounds") + } + return err.Cause() + + case unwrapper: + if i > 0 { + panic("index out of bounds") + } + return err.Unwrap() + + default: + return nil + } +} + +// Cause returns the first wrapped cause from the error value. +func Unwrap(in error) error { + switch err := in.(type) { + case unwrapper: + return err.Unwrap() + case causer: + return err.Cause() + case wrappedError: + errs := err.WrappedErrors() + if len(errs) == 0 { + return nil + } + return errs[0] + case multiCauser: + if err.NumCauses() == 0 { + return nil + } + return err.Cause(0) + default: + return nil + } +} + +// Walk walks the complete error tree. +func Walk(in error, fn func(error)) { + Iter(in, func(err error) bool { + fn(err) + return true + }) +} + +// Collect returns all errors in the error tree that matches the pred. +func Collect(in error, pred func(error) bool) []error { + var errs []error + Walk(in, func(err error) { + if pred(err) { + errs = append(errs, err) + } + }) + return errs +} + +// CollectType returns all errors that are convertible to the error type of sample. +func CollectType(in, sample error) []error { return Collect(in, PredType(sample)) } + +// IsType checks if any error in the error tree is convertible to type of sample. +func IsType(in, sample error) bool { return IsIf(in, PredType(sample)) } + +// FindType finds the first error the is convertible to the error type of sample. +func FindType(in, sample error) error { return FindIf(in, PredType(sample)) } + +// PredType creates a predicate checking if the type of an error value matches +// the type of sample. +// +// The predicate checks if the error type is equal or convertible to the sample +// type. +func PredType(sample error) func(error) bool { + sampleType := reflect.TypeOf(sample).Elem() + return func(current error) bool { + t := reflect.TypeOf(current).Elem() + return t == sampleType || t.ConvertibleTo(sampleType) + } +} + +// WalkEach walks every single error value in the given array of errors. +func WalkEach(errs []error, fn func(error)) { + for _, err := range errs { + Walk(err, fn) + } +} + +// Iter iterates the complete error tree calling fn on each error value found. +// The user function fn can stop the iteration by returning false. +func Iter(in error, fn func(err error) bool) { + doIter(in, fn) +} + +func doIter(in error, fn func(err error) bool) bool { + for { + if in == nil { + return true // continue searching + } + + // call fn and back-propagate search decision + if cont := fn(in); !cont { + return cont + } + + switch err := in.(type) { + case causer: + in = err.Cause() + + case unwrapper: + in = err.Unwrap() + + case multiCauser: + num := err.NumCauses() + switch num { + case 0: + return true + + case 1: + in = err.Cause(0) + + default: + for i := 0; i < num; i++ { + if cont := doIter(err.Cause(i), fn); !cont { + return false + } + } + return true + } + + case wrappedError: + for _, cause := range err.WrappedErrors() { + if cont := doIter(cause, fn); !cont { + return false + } + } + return true + + default: + return true + } + } +} diff --git a/vendor/github.com/urso/sderr/sderr.go b/vendor/github.com/urso/sderr/sderr.go new file mode 100644 index 00000000000..bff0faf89c2 --- /dev/null +++ b/vendor/github.com/urso/sderr/sderr.go @@ -0,0 +1,50 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package sderr + +import ( + "context" + "errors" + + "github.com/urso/diag" +) + +var emptyBuilder = &Builder{} + +// New returns an error that formats as the given text. Each call to New +// returns a distinct error value even if the text is identical. +func New(msg string) error { + return errors.New(msg) +} + +func With(fields ...interface{}) *Builder { + return emptyBuilder.With(fields...) +} + +func WithStack() *Builder { + return emptyBuilder.WithStack() +} + +func WithDiagnosticContext(ctx *diag.Context) *Builder { + return emptyBuilder.WithDiagnosticContext(ctx) +} + +func WithDiagnotics(ctx context.Context) *Builder { + return emptyBuilder.WithDiagnotics(ctx) +} + +func Errf(msg string, vs ...interface{}) error { + return emptyBuilder.doErrf(1, msg, vs) +} + +func Wrap(cause error, msg string, vs ...interface{}) error { + return emptyBuilder.doWrap(1, cause, msg, vs) +} + +func WrapAll(causes []error, msg string, vs ...interface{}) error { + return emptyBuilder.doWrapAll(1, causes, msg, vs) +} diff --git a/vendor/github.com/urso/sderr/stacktrace.go b/vendor/github.com/urso/sderr/stacktrace.go new file mode 100644 index 00000000000..9f1dc59a783 --- /dev/null +++ b/vendor/github.com/urso/sderr/stacktrace.go @@ -0,0 +1,110 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package sderr + +import ( + "fmt" + "io" + "runtime" + "unsafe" +) + +type StackTrace []Frame + +type Frame uintptr + +type loc struct { + file string + line int +} + +func makeStackTrace(skip int) StackTrace { + var pcs [20]uintptr + n := runtime.Callers(skip+2, pcs[:]) + stack := pcs[:n] + return *(*StackTrace)(unsafe.Pointer(&stack)) +} + +func getCaller(skip int) loc { + var pcs [1]uintptr + n := runtime.Callers(skip+2, pcs[:]) + if n == 0 { + return loc{} + } + + pc := pcs[0] - 1 + fn := runtime.FuncForPC(pc) + if fn == nil { + return loc{} + } + + file, line := fn.FileLine(pc) + return loc{ + file: file, + line: line, + } +} + +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for i, frame := range st { + if i > 0 { + io.WriteString(s, "\n") + } + frame.Format(s, verb) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + fmt.Fprintf(s, "%s", []Frame(st)) + } + case 's': + fmt.Fprintf(s, "%s", []Frame(st)) + } +} + +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +func (f Frame) Function() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "" + } + return fn.Name() +} + +func (f Frame) File() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +func (f Frame) Line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 'v', 's': + if s.Flag('+') || s.Flag('#') { + fmt.Fprintf(s, "%s\n\t%s:%d", f.Function(), f.File(), f.Line()) + } else { + fmt.Fprintf(s, "%s:%d", f.File(), f.Line()) + } + } +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index b33bdbaece8..0b9bb6030a0 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -626,32 +626,18 @@ func trace(args ...interface{}) func() { func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { // While we need more tokens to fetch, do it. for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - for i := len(parser.simple_keys) - 1; i >= 0; i-- { - simple_key := &parser.simple_keys[i] - if simple_key.token_number < parser.tokens_parsed { - break - } - if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { - return false - } else if valid && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break } } - - // We are finished. - if !need_more_tokens { - break - } // Fetch the next token. if !yaml_parser_fetch_next_token(parser) { return false @@ -883,6 +869,7 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { return false } parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 } return true } @@ -897,9 +884,10 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { "while scanning a simple key", parser.simple_keys[i].mark, "could not find expected ':'") } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) } - // Remove the key from the stack. - parser.simple_keys[i].possible = false return true } @@ -930,7 +918,9 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { if parser.flow_level > 0 { parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] } return true } @@ -1007,6 +997,8 @@ func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { // Initialize the simple key stack. parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + parser.simple_keys_by_tok = make(map[int]int) + // A simple key is allowed at the beginning of the stream. parser.simple_key_allowed = true @@ -1310,6 +1302,7 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool { // Remove the simple key. simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) // A simple key cannot follow another simple key. parser.simple_key_allowed = false diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go index e25cee563be..f6a9c8e34b1 100644 --- a/vendor/gopkg.in/yaml.v2/yamlh.go +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -579,6 +579,7 @@ type yaml_parser_t struct { simple_key_allowed bool // May a simple key occur at the current position? simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number // Parser stuff diff --git a/vendor/modules.txt b/vendor/modules.txt index 30328b34b00..a3e2a735b5b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -116,6 +116,8 @@ github.com/akavel/rsrc/coff github.com/akavel/rsrc/ico # github.com/andrewkroh/sys v0.0.0-20151128191922-287798fe3e43 github.com/andrewkroh/sys/windows/svc/eventlog +# github.com/antlr/antlr4 v0.0.0-20200225173536-225249fdaef5 +github.com/antlr/antlr4/runtime/Go/antlr # github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/armon/go-socks5 # github.com/aws/aws-lambda-go v1.6.0 @@ -565,6 +567,10 @@ github.com/googleapis/gnostic/extensions github.com/gorhill/cronexpr # github.com/gorilla/websocket v1.4.1 github.com/gorilla/websocket +# github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce +github.com/hashicorp/errwrap +# github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874 +github.com/hashicorp/go-multierror # github.com/hashicorp/go-uuid v1.0.1 github.com/hashicorp/go-uuid # github.com/hashicorp/go-version v1.0.0 @@ -656,6 +662,8 @@ github.com/modern-go/concurrent github.com/modern-go/reflect2 # github.com/morikuni/aec v1.0.0 github.com/morikuni/aec +# github.com/oklog/ulid v1.3.1 +github.com/oklog/ulid # github.com/opencontainers/go-digest v1.0.0-rc1.0.20190228220655-ac19fd6e7483 github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 @@ -725,7 +733,7 @@ github.com/sirupsen/logrus github.com/spf13/cobra # github.com/spf13/pflag v1.0.3 github.com/spf13/pflag -# github.com/stretchr/objx v0.1.2-0.20180702103455-b8b73a35e983 +# github.com/stretchr/objx v0.2.0 github.com/stretchr/objx # github.com/stretchr/testify v1.4.0 github.com/stretchr/testify/assert @@ -738,14 +746,26 @@ github.com/tsg/gopacket github.com/tsg/gopacket/afpacket github.com/tsg/gopacket/layers github.com/tsg/gopacket/pcap +# github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797 +github.com/urso/diag +github.com/urso/diag/ctxfmt +# github.com/urso/diag-ecs v0.0.0-20200210114345-ab085841dcb9 +github.com/urso/diag-ecs/ecs +# github.com/urso/ecslog v0.0.1 +github.com/urso/ecslog +github.com/urso/ecslog/backend +github.com/urso/ecslog/backend/appender +github.com/urso/ecslog/backend/layout # github.com/urso/go-bin v0.0.0-20180220135811-781c575c9f0e github.com/urso/go-bin -# github.com/urso/magetools v0.0.0-20200106130147-61080ed7b22b +# github.com/urso/magetools v0.0.0-20200125210132-c2e338f92f3a github.com/urso/magetools/clitool github.com/urso/magetools/ctrl github.com/urso/magetools/fs github.com/urso/magetools/gotool github.com/urso/magetools/mgenv +# github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec +github.com/urso/sderr # github.com/vmware/govmomi v0.0.0-20170802214208-2cad15190b41 github.com/vmware/govmomi github.com/vmware/govmomi/find @@ -1061,7 +1081,7 @@ gopkg.in/mgo.v2/bson gopkg.in/mgo.v2/internal/json gopkg.in/mgo.v2/internal/sasl gopkg.in/mgo.v2/internal/scram -# gopkg.in/yaml.v2 v2.2.7 +# gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 # honnef.co/go/tools v0.0.1-2019.2.3 honnef.co/go/tools/arg diff --git a/x-pack/agent/.gitignore b/x-pack/agent/.gitignore new file mode 100644 index 00000000000..8ec1b138bbd --- /dev/null +++ b/x-pack/agent/.gitignore @@ -0,0 +1,8 @@ +# agent +build/ +agent.dev.yml +pkg/agent/operation/tests/scripts/short--1.0.yml +pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86/configurable +pkg/agent/operation/tests/scripts/configurablebyfile-1.0-darwin-x86/configurablebyfile +pkg/agent/application/fleet.yml +pkg/core/plugin/operation/tests/scripts/configurable/1.0/configurable diff --git a/x-pack/agent/Dockerfile b/x-pack/agent/Dockerfile new file mode 100644 index 00000000000..8636ff37944 --- /dev/null +++ b/x-pack/agent/Dockerfile @@ -0,0 +1,16 @@ +ARG GO_VERSION=1.12.7 +FROM circleci/golang:${GO_VERSION} + + +ARG TEST_RESULTS=/tmp/test-results + +RUN mkdir -p ${TEST_RESULTS} && mkdir -p ./code +RUN go get github.com/magefile/mage + +ENV GO111MODULE=on +WORKDIR ./code +#COPY --chown=circleci:circleci . . +COPY . . +VOLUME "/tmp" "dev-tools/mage/build/distributions" +USER root + diff --git a/x-pack/agent/Makefile b/x-pack/agent/Makefile new file mode 100644 index 00000000000..71dd1dea852 --- /dev/null +++ b/x-pack/agent/Makefile @@ -0,0 +1,13 @@ + +# +# Variables +# +ES_BEATS ?= ../.. + +# +# Includes +# +include $(ES_BEATS)/dev-tools/make/xpack.mk + +.PHONY: docs +docs: ## @build Builds the documentation for the beat diff --git a/x-pack/agent/_meta/agent.docker.yml b/x-pack/agent/_meta/agent.docker.yml new file mode 100644 index 00000000000..3a900a1927f --- /dev/null +++ b/x-pack/agent/_meta/agent.docker.yml @@ -0,0 +1,87 @@ +###################################### +# Fleet configuration +###################################### +outputs: + default: + type: elasticsearch + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme + +streams: + - type: event/file + enabled: true + paths: + - /var/log/hello1.log + - /var/log/hello2.log + +management: + # Mode of management, the agent support two modes of operation: + # + # local: The agent will expect to find the inputs configuration in the local file. + # + # Default is local. + mode: "local" + + reporting: + log: + # format in which logs will be written, options are json or default. + format: "default" + + # Allow the agent to reload his configuration locally on disk. + # Notes: Only specific process configuration will be reloaded. + reload: + # enabled configure the agent to reload or not the local configuration. + # + # Default is true + enabled: true + + # period define how frequent we should look for changes in the configuration. + period: 10s + +download: + # source of the artifacts, requires elastic like structure and naming of the binaries + # e.g /windows-x86.zip + sourceURI: "https://artifacts.elastic.co/downloads/beats/" + # path to the directory containing downloaded packages + target_directory: "/home/elastic/downloads" + # timeout for downloading package + timeout: 30s + # file path to a public key used for verifying downloaded artifacts + # if not file is present agent will try to load public key from elastic.co website. + pgpfile: "/home/elastic/elastic.pgp" + # install_path describes the location of installed packages/programs. It is also used + # for reading program specifications. + install_path: "/home/elastic/install" + +process: + # minimal port number for spawned processes + min_port: 10000 + # maximum port number for spawned processes + max_port: 30000 + # timeout for creating new processes. when process is not successfully created by this timeout + # start operation is considered a failure + spawn_timeout: 30s + +retry: + # enabled determines whether retry is possible. Default is false. + enabled: true + # retries_count specifies number of retries. Default is 3. + # Retry count of 1 means it will be retried one time after one failure. + retries_count: 3 + # delay specifies delay in ms between retries. Default is 30s + delay: 30s + # max_delay specifies maximum delay in ms between retries. Default is 300s + max_delay: 5m + # Exponential determines whether delay is treated as exponential. + # With 30s delay and 3 retries: 30, 60, 120s + # Default is false + exponential: false + +monitoring: + # enabled turns on monitoring of running processes + enabled: false + # enables log monitoring + logs: false + # enables metrics monitoring + metrics: false diff --git a/x-pack/agent/_meta/agent.fleet.yml b/x-pack/agent/_meta/agent.fleet.yml new file mode 100644 index 00000000000..3157dc31c16 --- /dev/null +++ b/x-pack/agent/_meta/agent.fleet.yml @@ -0,0 +1,45 @@ +#================================ General ===================================== +# Beats is configured under Fleet, you can define most settings +# from the Kibana UI. You can update this file to configure the settings that +# are not supported by Fleet. +management: + mode: "fleet" + +download: + # source of the artifacts, requires elastic like structure and naming of the binaries + # e.g /windows-x86.zip + sourceURI: "https://artifacts.elastic.co/downloads/beats/" + # path to the directory containing downloaded packages + target_directory: "/home/elastic/downloads" + # timeout for downloading package + timeout: 30s + # file path to a public key used for verifying downloaded artifacts + # if not file is present agent will try to load public key from elastic.co website. + pgpfile: "/home/elastic/elastic.pgp" + # install_path describes the location of installed packages/programs. It is also used + # for reading program specifications. + install_path: "/home/elastic/install" + +process: + # minimal port number for spawned processes + min_port: 10000 + # maximum port number for spawned processes + max_port: 30000 + # timeout for creating new processes. when process is not successfully created by this timeout + # start operation is considered a failure + spawn_timeout: 30s + +retry: + # enabled determines whether retry is possible. Default is false. + enabled: true + # retries_count specifies number of retries. Default is 3. + # Retry count of 1 means it will be retried one time after one failure. + retries_count: 3 + # delay specifies delay in ms between retries. Default is 30s + delay: 30s + # max_delay specifies maximum delay in ms between retries. Default is 300s + max_delay: 5m + # Exponential determines whether delay is treated as exponential. + # With 30s delay and 3 retries: 30, 60, 120s + # Default is false + exponential: false diff --git a/x-pack/agent/_meta/agent.yml b/x-pack/agent/_meta/agent.yml new file mode 100644 index 00000000000..00b16718a3c --- /dev/null +++ b/x-pack/agent/_meta/agent.yml @@ -0,0 +1,88 @@ +###################################### +# Fleet configuration +###################################### +outputs: + default: + type: elasticsearch + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme + +streams: + - type: event/file + enabled: true + paths: + - /var/log/hello1.log + - /var/log/hello2.log + +management: + # Mode of management, the agent support two modes of operation: + # + # local: The agent will expect to find the inputs configuration in the local file. + # + # Default is local. + mode: "local" + + + reporting: + log: + # format in which logs will be written, options are json or default. + format: "default" + + # Allow the agent to reload his configuration locally on disk. + # Notes: Only specific process configuration will be reloaded. + reload: + # enabled configure the agent to reload or not the local configuration. + # + # Default is true + enabled: true + + # period define how frequent we should look for changes in the configuration. + period: 10s + +download: + # source of the artifacts, requires elastic like structure and naming of the binaries + # e.g /windows-x86.zip + sourceURI: "https://artifacts.elastic.co/downloads/beats/" + # path to the directory containing downloaded packages + target_directory: "/home/elastic/downloads" + # timeout for downloading package + timeout: 30s + # file path to a public key used for verifying downloaded artifacts + # if not file is present agent will try to load public key from elastic.co website. + pgpfile: "/home/elastic/elastic.pgp" + # install_path describes the location of installed packages/programs. It is also used + # for reading program specifications. + install_path: "/home/elastic/install" + +process: + # minimal port number for spawned processes + min_port: 10000 + # maximum port number for spawned processes + max_port: 30000 + # timeout for creating new processes. when process is not successfully created by this timeout + # start operation is considered a failure + spawn_timeout: 30s + +retry: + # enabled determines whether retry is possible. Default is false. + enabled: true + # retries_count specifies number of retries. Default is 3. + # Retry count of 1 means it will be retried one time after one failure. + retries_count: 3 + # delay specifies delay in ms between retries. Default is 30s + delay: 30s + # max_delay specifies maximum delay in ms between retries. Default is 300s + max_delay: 5m + # Exponential determines whether delay is treated as exponential. + # With 30s delay and 3 retries: 30, 60, 120s + # Default is false + exponential: false + +monitoring: + # enabled turns on monitoring of running processes + enabled: false + # enables log monitoring + logs: false + # enables metrics monitoring + metrics: false diff --git a/x-pack/agent/_meta/common.p1.yml b/x-pack/agent/_meta/common.p1.yml new file mode 100644 index 00000000000..6c9a9f190e7 --- /dev/null +++ b/x-pack/agent/_meta/common.p1.yml @@ -0,0 +1,5 @@ +###################### Agent Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The agent.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. diff --git a/x-pack/agent/_meta/common.p2.yml b/x-pack/agent/_meta/common.p2.yml new file mode 100644 index 00000000000..5a75ec2fc60 --- /dev/null +++ b/x-pack/agent/_meta/common.p2.yml @@ -0,0 +1,113 @@ +###################################### +# Fleet configuration +###################################### +outputs: + default: + type: elasticsearch + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme + +streams: + - type: event/file + enabled: true + paths: + - /var/log/hello1.log + - /var/log/hello2.log + +management: + # Mode of magement, the agent support two modes of operation: + # + # local: The agent will expect to find the inputs configuration in the local file. + # + # Default is local. + mode: "local" + + fleet: + access_token: "" + kibana: + # kibana minimal configuration + host: "localhost:5601" + ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # optional values + #protocol: "https" + #username: "elastic" + #password: "changeme" + #path: "" + #ssl.verification_mode: full + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + #ssl.cipher_suites: [] + #ssl.curve_types: [] + + reporting: + log: + # format in which logs will be written, options are json or default. + format: "default" + fleet: + # enables fleet reporter. fleet reporting can be enabled only in fleet management.mode. + enabled: false + + # Reporting threshold indicates how many events should be kept in-memory before reporting them to fleet. + reporting_threshold: 10000 + + # Frequency used to check the queue of events to be sent out to fleet. + reporting_check_frequency_sec: 30 + + # Allow fleet to reload his configuration locally on disk. + # Notes: Only specific process configuration will be reloaded. + reload: + # enabled configure the agent to reload or not the local configuration. + # + # Default is true + enabled: true + + # period define how frequent we should look for changes in the configuration. + period: 10s + +download: + # source of the artifacts, requires elastic like structure and naming of the binaries + # e.g /windows-x86.zip + sourceURI: "https://artifacts.elastic.co/downloads/beats/" + # path to the directory containing downloaded packages + target_directory: "/home/elastic/downloads" + # timeout for downloading package + timeout: 30s + # file path to a public key used for verifying downloaded artifacts + # if not file is present agent will try to load public key from elastic.co website. + pgpfile: "/home/elastic/elastic.pgp" + # install_path describes the location of installed packages/programs. It is also used + # for reading program specifications. + install_path: "/home/elastic/install" + +process: + # minimal port number for spawned processes + min_port: 10000 + # maximum port number for spawned processes + max_port: 30000 + # timeout for creating new processes. when process is not successfully created by this timeout + # start operation is considered a failure + spawn_timeout: 30s + +retry: + # Enabled determines whether retry is possible. Default is false. + enabled: true + # RetriesCount specifies number of retries. Default is 3. + # Retry count of 1 means it will be retried one time after one failure. + retriesCount: 3 + # Delay specifies delay in ms between retries. Default is 30s + delay: 30s + # MaxDelay specifies maximum delay in ms between retries. Default is 300s + maxDelay: 5m + # Exponential determines whether delay is treated as exponential. + # With 30s delay and 3 retries: 30, 60, 120s + # Default is false + exponential: false + +monitoring: + # enabled turns on monitoring of running processes + enabled: false + # enables log monitoring + logs: false + # enables metrics monitoring + metrics: false diff --git a/x-pack/agent/_meta/common.reference.p1.yml b/x-pack/agent/_meta/common.reference.p1.yml new file mode 100644 index 00000000000..6c9a9f190e7 --- /dev/null +++ b/x-pack/agent/_meta/common.reference.p1.yml @@ -0,0 +1,5 @@ +###################### Agent Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The agent.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. diff --git a/x-pack/agent/_meta/common.reference.p2.yml b/x-pack/agent/_meta/common.reference.p2.yml new file mode 100644 index 00000000000..48b73572621 --- /dev/null +++ b/x-pack/agent/_meta/common.reference.p2.yml @@ -0,0 +1,113 @@ +###################################### +# Fleet configuration +###################################### +outputs: + default: + type: elasticsearch + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme + +streams: + - type: event/file + enabled: true + paths: + - /var/log/hello1.log + - /var/log/hello2.log + +management: + # Mode of management, the agent currently only support the following mode: + # + # local: The agent will expect to find the inputs configuration in the local file. + # + # Default is local. + mode: local + + fleet: + access_token: "" + kibana: + # kibana minimal configuration + host: "localhost:5601" + ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # optional values + #protocol: "https" + #username: "elastic" + #password: "changeme" + #path: "" + #ssl.verification_mode: full + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + #ssl.cipher_suites: [] + #ssl.curve_types: [] + + reporting: + log: + # format in which logs will be written, options are json or default. + format: "default" + fleet: + # enables fleet reporter. fleet reporting can be enabled only in fleet management.mode. + enabled: false + + # Reporting threshold indicates how many events should be kept in-memory before reporting them to fleet. + reporting_threshold: 10000 + + # Frequency used to check the queue of events to be sent out to fleet. + reporting_check_frequency_sec: 30 + + # Allow fleet to reload his configuration locally on disk. + # Notes: Only specific process configuration will be reloaded. + reload: + # enabled configure the agent to reload or not the local configuration. + # + # Default is true + enabled: true + + # period define how frequent we should look for changes in the configuration. + period: 10s + +download: + # source of the artifacts, requires elastic like structure and naming of the binaries + # e.g /windows-x86.zip + sourceURI: "https://artifacts.elastic.co/downloads/beats/" + # path to the directory containing downloaded packages + target_directory: "/home/elastic/downloads" + # timeout for downloading package + timeout: 30s + # file path to a public key used for verifying downloaded artifacts + # if not file is present agent will try to load public key from elastic.co website. + pgpfile: "/home/elastic/elastic.pgp" + # install_path describes the location of installed packages/programs. It is also used + # for reading program specifications. + install_path: "/home/elastic/install" + +process: + # minimal port number for spawned processes + min_port: 10000 + # maximum port number for spawned processes + max_port: 30000 + # timeout for creating new processes. when process is not successfully created by this timeout + # start operation is considered a failure + spawn_timeout: 30s + +retry: + # enabled determines whether retry is possible. Default is false. + enabled: true + # retries_count specifies number of retries. Default is 3. + # Retry count of 1 means it will be retried one time after one failure. + retries_count: 3 + # delay specifies delay in ms between retries. Default is 30s + delay: 30s + # max_delay specifies maximum delay in ms between retries. Default is 300s + max_delay: 5m + # Exponential determines whether delay is treated as exponential. + # With 30s delay and 3 retries: 30, 60, 120s + # Default is false + exponential: false + +monitoring: + # enabled turns on monitoring of running processes + enabled: false + # enables log monitoring + logs: false + # enables metrics monitoring + metrics: false diff --git a/x-pack/agent/agent.docker.yml b/x-pack/agent/agent.docker.yml new file mode 100644 index 00000000000..3a900a1927f --- /dev/null +++ b/x-pack/agent/agent.docker.yml @@ -0,0 +1,87 @@ +###################################### +# Fleet configuration +###################################### +outputs: + default: + type: elasticsearch + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme + +streams: + - type: event/file + enabled: true + paths: + - /var/log/hello1.log + - /var/log/hello2.log + +management: + # Mode of management, the agent support two modes of operation: + # + # local: The agent will expect to find the inputs configuration in the local file. + # + # Default is local. + mode: "local" + + reporting: + log: + # format in which logs will be written, options are json or default. + format: "default" + + # Allow the agent to reload his configuration locally on disk. + # Notes: Only specific process configuration will be reloaded. + reload: + # enabled configure the agent to reload or not the local configuration. + # + # Default is true + enabled: true + + # period define how frequent we should look for changes in the configuration. + period: 10s + +download: + # source of the artifacts, requires elastic like structure and naming of the binaries + # e.g /windows-x86.zip + sourceURI: "https://artifacts.elastic.co/downloads/beats/" + # path to the directory containing downloaded packages + target_directory: "/home/elastic/downloads" + # timeout for downloading package + timeout: 30s + # file path to a public key used for verifying downloaded artifacts + # if not file is present agent will try to load public key from elastic.co website. + pgpfile: "/home/elastic/elastic.pgp" + # install_path describes the location of installed packages/programs. It is also used + # for reading program specifications. + install_path: "/home/elastic/install" + +process: + # minimal port number for spawned processes + min_port: 10000 + # maximum port number for spawned processes + max_port: 30000 + # timeout for creating new processes. when process is not successfully created by this timeout + # start operation is considered a failure + spawn_timeout: 30s + +retry: + # enabled determines whether retry is possible. Default is false. + enabled: true + # retries_count specifies number of retries. Default is 3. + # Retry count of 1 means it will be retried one time after one failure. + retries_count: 3 + # delay specifies delay in ms between retries. Default is 30s + delay: 30s + # max_delay specifies maximum delay in ms between retries. Default is 300s + max_delay: 5m + # Exponential determines whether delay is treated as exponential. + # With 30s delay and 3 retries: 30, 60, 120s + # Default is false + exponential: false + +monitoring: + # enabled turns on monitoring of running processes + enabled: false + # enables log monitoring + logs: false + # enables metrics monitoring + metrics: false diff --git a/x-pack/agent/agent.reference.yml b/x-pack/agent/agent.reference.yml new file mode 100644 index 00000000000..dc425dc00b5 --- /dev/null +++ b/x-pack/agent/agent.reference.yml @@ -0,0 +1,118 @@ +###################### Agent Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The agent.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +###################################### +# Fleet configuration +###################################### +outputs: + default: + type: elasticsearch + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme + +streams: + - type: event/file + enabled: true + paths: + - /var/log/hello1.log + - /var/log/hello2.log + +management: + # Mode of management, the agent currently only support the following mode: + # + # local: The agent will expect to find the inputs configuration in the local file. + # + # Default is local. + mode: local + + fleet: + access_token: "" + kibana: + # kibana minimal configuration + host: "localhost:5601" + ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # optional values + #protocol: "https" + #username: "elastic" + #password: "changeme" + #path: "" + #ssl.verification_mode: full + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + #ssl.cipher_suites: [] + #ssl.curve_types: [] + + reporting: + log: + # format in which logs will be written, options are json or default. + format: "default" + fleet: + # enables fleet reporter. fleet reporting can be enabled only in fleet management.mode. + enabled: false + + # Reporting threshold indicates how many events should be kept in-memory before reporting them to fleet. + reporting_threshold: 10000 + + # Frequency used to check the queue of events to be sent out to fleet. + reporting_check_frequency_sec: 30 + + # Allow fleet to reload his configuration locally on disk. + # Notes: Only specific process configuration will be reloaded. + reload: + # enabled configure the agent to reload or not the local configuration. + # + # Default is true + enabled: true + + # period define how frequent we should look for changes in the configuration. + period: 10s + +download: + # source of the artifacts, requires elastic like structure and naming of the binaries + # e.g /windows-x86.zip + sourceURI: "https://artifacts.elastic.co/downloads/beats/" + # path to the directory containing downloaded packages + target_directory: "/home/elastic/downloads" + # timeout for downloading package + timeout: 30s + # file path to a public key used for verifying downloaded artifacts + # if not file is present agent will try to load public key from elastic.co website. + pgpfile: "/home/elastic/elastic.pgp" + # install_path describes the location of installed packages/programs. It is also used + # for reading program specifications. + install_path: "/home/elastic/install" + +process: + # minimal port number for spawned processes + min_port: 10000 + # maximum port number for spawned processes + max_port: 30000 + # timeout for creating new processes. when process is not successfully created by this timeout + # start operation is considered a failure + spawn_timeout: 30s + +retry: + # enabled determines whether retry is possible. Default is false. + enabled: true + # retries_count specifies number of retries. Default is 3. + # Retry count of 1 means it will be retried one time after one failure. + retries_count: 3 + # delay specifies delay in ms between retries. Default is 30s + delay: 30s + # max_delay specifies maximum delay in ms between retries. Default is 300s + max_delay: 5m + # Exponential determines whether delay is treated as exponential. + # With 30s delay and 3 retries: 30, 60, 120s + # Default is false + exponential: false + +monitoring: + # enabled turns on monitoring of running processes + enabled: false + # enables log monitoring + logs: false + # enables metrics monitoring + metrics: false diff --git a/x-pack/agent/agent.yml b/x-pack/agent/agent.yml new file mode 100644 index 00000000000..07a58aed369 --- /dev/null +++ b/x-pack/agent/agent.yml @@ -0,0 +1,118 @@ +###################### Agent Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The agent.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +###################################### +# Fleet configuration +###################################### +outputs: + default: + type: elasticsearch + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme + +streams: + - type: event/file + enabled: true + paths: + - /var/log/hello1.log + - /var/log/hello2.log + +management: + # Mode of magement, the agent support two modes of operation: + # + # local: The agent will expect to find the inputs configuration in the local file. + # + # Default is local. + mode: "local" + + fleet: + access_token: "" + kibana: + # kibana minimal configuration + host: "localhost:5601" + ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # optional values + #protocol: "https" + #username: "elastic" + #password: "changeme" + #path: "" + #ssl.verification_mode: full + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + #ssl.cipher_suites: [] + #ssl.curve_types: [] + + reporting: + log: + # format in which logs will be written, options are json or default. + format: "default" + fleet: + # enables fleet reporter. fleet reporting can be enabled only in fleet management.mode. + enabled: false + + # Reporting threshold indicates how many events should be kept in-memory before reporting them to fleet. + reporting_threshold: 10000 + + # Frequency used to check the queue of events to be sent out to fleet. + reporting_check_frequency_sec: 30 + + # Allow fleet to reload his configuration locally on disk. + # Notes: Only specific process configuration will be reloaded. + reload: + # enabled configure the agent to reload or not the local configuration. + # + # Default is true + enabled: true + + # period define how frequent we should look for changes in the configuration. + period: 10s + +download: + # source of the artifacts, requires elastic like structure and naming of the binaries + # e.g /windows-x86.zip + sourceURI: "https://artifacts.elastic.co/downloads/beats/" + # path to the directory containing downloaded packages + target_directory: "/home/elastic/downloads" + # timeout for downloading package + timeout: 30s + # file path to a public key used for verifying downloaded artifacts + # if not file is present agent will try to load public key from elastic.co website. + pgpfile: "/home/elastic/elastic.pgp" + # install_path describes the location of installed packages/programs. It is also used + # for reading program specifications. + install_path: "/home/elastic/install" + +process: + # minimal port number for spawned processes + min_port: 10000 + # maximum port number for spawned processes + max_port: 30000 + # timeout for creating new processes. when process is not successfully created by this timeout + # start operation is considered a failure + spawn_timeout: 30s + +retry: + # Enabled determines whether retry is possible. Default is false. + enabled: true + # RetriesCount specifies number of retries. Default is 3. + # Retry count of 1 means it will be retried one time after one failure. + retriesCount: 3 + # Delay specifies delay in ms between retries. Default is 30s + delay: 30s + # MaxDelay specifies maximum delay in ms between retries. Default is 300s + maxDelay: 5m + # Exponential determines whether delay is treated as exponential. + # With 30s delay and 3 retries: 30, 60, 120s + # Default is false + exponential: false + +monitoring: + # enabled turns on monitoring of running processes + enabled: false + # enables log monitoring + logs: false + # enables metrics monitoring + metrics: false diff --git a/x-pack/agent/cmd/agent/agent.go b/x-pack/agent/cmd/agent/agent.go new file mode 100644 index 00000000000..6b1301ee388 --- /dev/null +++ b/x-pack/agent/cmd/agent/agent.go @@ -0,0 +1,25 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "fmt" + "math/rand" + "os" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/cmd" +) + +// main just a wrapper, all commands are defined in 'pkg/agent/cmd'. +func main() { + rand.Seed(time.Now().UnixNano()) + + command := cmd.NewCommand() + if err := command.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} diff --git a/x-pack/agent/dev-tools/cmd/buildfleetcfg/buildfleetcfg.go b/x-pack/agent/dev-tools/cmd/buildfleetcfg/buildfleetcfg.go new file mode 100644 index 00000000000..4987fffcce9 --- /dev/null +++ b/x-pack/agent/dev-tools/cmd/buildfleetcfg/buildfleetcfg.go @@ -0,0 +1,110 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "os" + "text/template" + + "github.com/elastic/beats/v7/licenses" + "github.com/elastic/beats/v7/x-pack/agent/pkg/packer" +) + +var ( + input string + output string + license string +) + +func init() { + flag.StringVar(&input, "in", "", "config to embed") + flag.StringVar(&output, "out", "-", "Output path. \"-\" means writing to stdout") + flag.StringVar(&license, "license", "Elastic", "License header for generated file.") +} + +var tmpl = template.Must(template.New("cfg").Parse(` +{{ .License }} +// Code generated by dev-tools/cmd/buildfleetcfg/buildfleetcfg.go - DO NOT EDIT. + +package application + +import "github.com/elastic/beats/v7/x-pack/agent/pkg/packer" + +// DefaultAgentFleetConfig is the content of the default configuration when we enroll a beat, the agent.yml +// will be replaced with this variables. +var DefaultAgentFleetConfig []byte + +func init() { + // Packed File + {{ range $i, $f := .Files -}} + // {{ $f }} + {{ end -}} + unpacked := packer.MustUnpack("{{ .Pack }}") + raw, ok := unpacked["_meta/agent.fleet.yml"] + if !ok { + // ensure we have something loaded. + panic("agent.fleet.yml is not included in the binary") + } + DefaultAgentFleetConfig = raw +} +`)) + +func main() { + flag.Parse() + + if len(input) == 0 { + fmt.Fprintln(os.Stderr, "Invalid input source") + os.Exit(1) + } + + l, err := licenses.Find(license) + if err != nil { + fmt.Fprintf(os.Stderr, "problem to retrieve the license, error: %+v", err) + os.Exit(1) + } + + data, err := gen(input, l) + if err != nil { + fmt.Fprintf(os.Stderr, "Error while generating the file, err: %+v\n", err) + os.Exit(1) + } + + if output == "-" { + os.Stdout.Write(data) + return + } + + ioutil.WriteFile(output, data, 0640) + return +} + +func gen(path string, l string) ([]byte, error) { + pack, files, err := packer.Pack(input) + if err != nil { + return nil, err + } + + if len(files) > 1 { + return nil, fmt.Errorf("Can only embed a single configuration file") + } + + var buf bytes.Buffer + tmpl.Execute(&buf, struct { + Pack string + Files []string + License string + }{ + Pack: pack, + Files: files, + License: l, + }) + + return format.Source(buf.Bytes()) +} diff --git a/x-pack/agent/dev-tools/cmd/buildspec/buildspec.go b/x-pack/agent/dev-tools/cmd/buildspec/buildspec.go new file mode 100644 index 00000000000..7cef6ef9a72 --- /dev/null +++ b/x-pack/agent/dev-tools/cmd/buildspec/buildspec.go @@ -0,0 +1,120 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "os" + "text/template" + + "github.com/elastic/beats/v7/licenses" + "github.com/elastic/beats/v7/x-pack/agent/pkg/packer" +) + +var ( + input string + output string + license string +) + +func init() { + flag.StringVar(&input, "in", "", "Source of input. \"-\" means reading from stdin") + flag.StringVar(&output, "out", "-", "Output path. \"-\" means writing to stdout") + flag.StringVar(&license, "license", "Elastic", "License header for generated file.") +} + +var tmpl = template.Must(template.New("specs").Parse(` +{{ .License }} +// Code generated by x-pack/dev-tools/cmd/buildspec/buildspec.go - DO NOT EDIT. + +package program + +import ( + "strings" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/packer" +) + +var Supported []Spec +var SupportedMap map[string]bool + +func init() { + // Packed Files + {{ range $i, $f := .Files -}} + // {{ $f }} + {{ end -}} + unpacked := packer.MustUnpack("{{ .Pack }}") + SupportedMap = make(map[string]bool) + + for f, v := range unpacked { + s, err:= NewSpecFromBytes(v) + if err != nil { + panic("Cannot read spec from " + f) + } + Supported = append(Supported, s) + SupportedMap[strings.ToLower(s.Name)] = true + } +} +`)) + +func main() { + flag.Parse() + + if len(input) == 0 { + fmt.Fprintln(os.Stderr, "Invalid input source") + os.Exit(1) + } + + l, err := licenses.Find(license) + if err != nil { + fmt.Fprintf(os.Stderr, "problem to retrieve the license, error: %+v", err) + os.Exit(1) + return + } + + data, err := gen(input, l) + if err != nil { + fmt.Fprintf(os.Stderr, "Error while generating the file, err: %+v\n", err) + os.Exit(1) + } + + if output == "-" { + os.Stdout.Write(data) + return + } else { + ioutil.WriteFile(output, data, 0640) + } + + return +} + +func gen(path string, l string) ([]byte, error) { + pack, files, err := packer.Pack(input) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + tmpl.Execute(&buf, struct { + Pack string + Files []string + License string + }{ + Pack: pack, + Files: files, + License: l, + }) + + formatted, err := format.Source(buf.Bytes()) + if err != nil { + return nil, err + } + + return formatted, nil +} diff --git a/x-pack/agent/dev-tools/cmd/fakewebapi/README.md b/x-pack/agent/dev-tools/cmd/fakewebapi/README.md new file mode 100644 index 00000000000..23853de9a5c --- /dev/null +++ b/x-pack/agent/dev-tools/cmd/fakewebapi/README.md @@ -0,0 +1,13 @@ +Fakewebapi is a simple test only Webserver + +The server implements the minimal calls and response to do high level testing of the agent: + +- Enroll successfully an Agent. +- Allow an Agent to periodically check in. + + +By default the server will return an empty list of actions, it's possible at runtime to change the returned +data by using the `push.sh` script. The script will POST a JSON document to return on the next request. + +Read the code of `push.sh` and the `fetch.sh` script for the usage information. + diff --git a/x-pack/agent/dev-tools/cmd/fakewebapi/action_example.json b/x-pack/agent/dev-tools/cmd/fakewebapi/action_example.json new file mode 100644 index 00000000000..ab1ff1b79c7 --- /dev/null +++ b/x-pack/agent/dev-tools/cmd/fakewebapi/action_example.json @@ -0,0 +1,39 @@ +{ + "action": "checkin", + "success": true, + "actions": [ + { + "type": "POLICY_CHANGE", + "data": { + "policy": { + "id": "default", + "outputs": { + "default": { + "id": "default", + "name": "default", + "type": "elasticsearch", + "hosts": "http://localhost:9200", + "ingest_pipeline": "default" + } + }, + "streams": [ + { + "type": "metric/nginx", + "metricsets": [ + "stubstatus" + ], + "period": "10s", + "enabled": true, + "hosts": "http://127.0.0.1", + "id": "stubstatus", + "output": { + "use_output": "default" + } + } + ] + } + }, + "id": "6d22f55a-d6e8-4e52-bcaa-16dde6091c5c" + } + ] +} diff --git a/x-pack/agent/dev-tools/cmd/fakewebapi/checkin.json b/x-pack/agent/dev-tools/cmd/fakewebapi/checkin.json new file mode 100644 index 00000000000..7a7ecd4e2d1 --- /dev/null +++ b/x-pack/agent/dev-tools/cmd/fakewebapi/checkin.json @@ -0,0 +1,4 @@ +{ + "events": [] +} + diff --git a/x-pack/agent/dev-tools/cmd/fakewebapi/fetch.sh b/x-pack/agent/dev-tools/cmd/fakewebapi/fetch.sh new file mode 100755 index 00000000000..c84d305081c --- /dev/null +++ b/x-pack/agent/dev-tools/cmd/fakewebapi/fetch.sh @@ -0,0 +1,5 @@ +#!/bin/sh +APIKEY=${1:-"abc123"} +AGENTID=${2:-"agent007"} +FILE=${3:-"checkin.json"} +curl -H "Authorization: ApiKey $APIKEY" -X POST --data "@$FILE" http://localhost:8080/api/fleet/agents/$AGENTID/checkin diff --git a/x-pack/agent/dev-tools/cmd/fakewebapi/main.go b/x-pack/agent/dev-tools/cmd/fakewebapi/main.go new file mode 100644 index 00000000000..27611b78467 --- /dev/null +++ b/x-pack/agent/dev-tools/cmd/fakewebapi/main.go @@ -0,0 +1,166 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "bytes" + "encoding/json" + "flag" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "regexp" + "strings" + "sync" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" +) + +var ( + host string + apiKey string + mutex sync.Mutex + + pathCheckin = regexp.MustCompile(`^/api/fleet/agents/(.+)/checkin`) + checkinResponse = response{Actions: make([]action, 0), Success: true} +) + +type response struct { + Actions []action `json:"actions"` + Success bool `json:"success"` +} + +type action interface{} + +func init() { + flag.StringVar(&apiKey, "apikey", "abc123", "API Key to authenticate") + flag.StringVar(&host, "host", "localhost:8080", "The IP and port to use for the webserver") +} + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/api/fleet/agents/enroll", handlerEnroll) + mux.HandleFunc("/admin/actions", handlerAction) + mux.HandleFunc("/", handlerRoot) + + log.Printf("Starting webserver and listening on %s", host) + + listener, err := net.Listen("tcp", host) + if err != nil { + panic(err) + } + defer listener.Close() + + http.Serve(listener, mux) +} + +func handlerRoot(w http.ResponseWriter, r *http.Request) { + if pathCheckin.MatchString(r.URL.Path) { + authHandler(handlerCheckin, apiKey)(w, r) + return + } + + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{ "message": "Hello!"}`)) + log.Println("Root hello!") + log.Println("Path: ", r.URL.Path) +} + +func handlerEnroll(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + http.Error(w, "Bad Request", http.StatusBadRequest) + return + } + + response := &fleetapi.EnrollResponse{ + Action: "created", + Success: true, + Item: fleetapi.EnrollItemResponse{ + ID: "a4937110-e53e-11e9-934f-47a8e38a522c", + Active: true, + PolicyID: "default", + Type: fleetapi.PermanentEnroll, + EnrolledAt: time.Now(), + UserProvidedMetadata: make(map[string]interface{}), + LocalMetadata: make(map[string]interface{}), + AccessAPIKey: apiKey, + }, + } + + b, err := json.Marshal(response) + if err != nil { + log.Printf("failed to enroll: %+v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + w.Write(b) + log.Println("Enroll response:", string(b)) +} + +func handlerCheckin(w http.ResponseWriter, r *http.Request) { + mutex.Lock() + defer mutex.Unlock() + + b, err := json.Marshal(checkinResponse) + if err != nil { + log.Printf("Failed to checkin, error: %+v", err) + http.Error(w, "Internal Server error", http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + w.Write(b) + log.Println("Checkin response: ", string(b)) +} + +func handlerAction(w http.ResponseWriter, r *http.Request) { + mutex.Lock() + defer mutex.Unlock() + if r.Method != "POST" { + http.Error(w, "Bad Request", http.StatusBadRequest) + return + } + + resp := response{} + + var buf bytes.Buffer + tee := io.TeeReader(r.Body, &buf) + + c, err := ioutil.ReadAll(tee) + if err != nil { + log.Printf("Fails to update the actions") + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + decoder := json.NewDecoder(&buf) + err = decoder.Decode(&resp) + if err != nil { + log.Printf("Fails to update the actions") + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + checkinResponse = resp + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{ "success": true }`)) + log.Println("Action request: ", string(c)) +} + +func authHandler(handler http.HandlerFunc, apiKey string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + const key = "Authorization" + const prefix = "ApiKey " + + v := strings.TrimPrefix(r.Header.Get(key), prefix) + if v != apiKey { + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + handler(w, r) + } +} diff --git a/x-pack/agent/dev-tools/cmd/fakewebapi/push.sh b/x-pack/agent/dev-tools/cmd/fakewebapi/push.sh new file mode 100755 index 00000000000..602df720cb1 --- /dev/null +++ b/x-pack/agent/dev-tools/cmd/fakewebapi/push.sh @@ -0,0 +1,3 @@ +#!/bin/sh +FILE=${1:-"action_example.json"} +curl -X POST --data "@$FILE" http://localhost:8080/admin/actions diff --git a/x-pack/agent/docker-compose.yml b/x-pack/agent/docker-compose.yml new file mode 100644 index 00000000000..1acbbbdec01 --- /dev/null +++ b/x-pack/agent/docker-compose.yml @@ -0,0 +1,9 @@ +version: '3' +services: + testunit: + build: + context: . + dockerfile: Dockerfile.test + args: + GO_VERSION: ${GO_VERSION} + command: ["mage", "update", "check:all", "test:all"] diff --git a/x-pack/agent/docs/agent.asciidoc b/x-pack/agent/docs/agent.asciidoc new file mode 100644 index 00000000000..f935ec54b39 --- /dev/null +++ b/x-pack/agent/docs/agent.asciidoc @@ -0,0 +1,8 @@ +Agent's notes + + +[[requirements]] +Requirements + +The remote Kibana version of the Agent must be equal or higher than the Agent version, if this is not met +Kibana will refuse the connection. diff --git a/x-pack/agent/docs/agent_configuration_example.yml b/x-pack/agent/docs/agent_configuration_example.yml new file mode 100644 index 00000000000..08b92363921 --- /dev/null +++ b/x-pack/agent/docs/agent_configuration_example.yml @@ -0,0 +1,565 @@ +outputs: + default: + type: elasticsearch + api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw + hosts: ["localhost:9200"] + ca_sha256: "7lHLiyp4J8m9kw38SJ7SURJP4bXRZv/BNxyyXkCcE/M=" + # Not supported at first + queue: + type: disk + + long_term_storage: + type: elasticsearch + api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw + hosts: ["localhost:9200"] + ca_sha256: "7lHLiyp4J8m9kw38SJ7SURJP4bXRZv/BNxyyXkCcE/M=" + queue: + type: disk + + monitoring: + type: elasticsearch + api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw + hosts: ["localhost:9200"] + ca_sha256: "7lHLiyp4J8m9kw38SJ7SURJP4bXRZv/BNxyyXkCcE/M=" + +settings.monitoring: + use_output: monitoring + +datasources: + # use the nginx package + - id?: nginx-x1 + enabled?: true # default to true + title?: "This is a nice title for human" + # Package this config group is coming from. On importing, we know where it belongs + # The package tells the UI which application to link to + package?: + name: epm/nginx + version: 1.7.0 + namespace?: prod + constraints?: + # Contraints look are not final + - os.platform: { in: "windows" } + - agent.version: { ">=": "8.0.0" } + use_output: long_term_storage + inputs: + - type: logs + processors?: + streams: + - id?: {id} + enabled?: true # default to true + dataset: nginx.acccess + paths: /var/log/nginx/access.log + - id?: {id} + enabled?: true # default to true + dataset: nginx.error + paths: /var/log/nginx/error.log + - type: nginx/metrics + streams: + - id?: {id} + enabled?: true # default to true + dataset: nginx.stub_status + metricset: stub_status + + ################################################################################################# + # Custom Kafka datasource + - id: kafka-x1 + title: "Consume data from kafka" + namespace?: prod + use_output: long_term_storage + inputs: + - type: kafka + host: localhost:6566 + streams: + - dataset: foo.dataset + topic: foo + processors: + - extract_bro_specifics + + + ################################################################################################# + # System EPM package + - id?: system + title: Collect system information and metrics + package: + name: epm/system + version: 1.7.0 + inputs: + - type: system/metrics + streams: + - id?: {id} + enabled?: false # default true + metricset: cpu + dataset: system.cpu + metrics: ["percentages", "normalized_percentages"] + period: 10s + - metricset: memory + dataset: system.memory + period: 10s + - metricset: diskio + dataset: system.diskio + period: 10s + - metricset: load + dataset: system.load + period: 10s + - metricset: memory + dataset: system.memory + period: 10s + - metricset: process + dataset: system.process + processes: ["firefox*"] + include_top_n: + by_cpu: 5 # include top 5 processes by CPU + by_memory: 5 # include top 5 processes by memory + period: 10s + - metricset: process_summary + dataset: system.process_summary + period: 10s + - metricset: uptime + dataset: system.uptime + period: 15m + - metricset: socket_summary + dataset: system.socket_summary + period: 10s + - metricset: filesystem + dataset: system.filesystem + period: 10s + - metricset: raid + dataset: system.raid + period: 10s + - metricset: socket + dataset: system.socket + period: 10s + - metricset: service + dataset: system.service + period: 10s + - metricset: fsstat + dataset: system.fsstat + period: 10s + - metricset: foo + dataset: system.foo + period: 10s + + + ################################################################################################# + # Elasticsearch package example + - id?: my-endpoint + title: Collect Elasticsearch information + package: + name: epm/elasticsearch + version: 1.7.0 + inputs: + - type: log + streams: + - id?: {id} + enabled?: true # default to true + dataset: elasticsearch.audit + paths: [/var/log/elasticsearch/*_access.log, /var/log/elasticsearch/*_audit.log] + - id?: {id} + enabled?: true + dataset: elasticsearch.deprecation + paths: [/var/log/elasticsearch/*_deprecation.log] + - id?: {id} + enabled?: true + dataset: elasticsearch.gc + paths: [/var/log/elasticsearch/*_gc.log, /var/log/elasticsearch/*_gc.log.[0-9]*] + - id?: {id} + enabled?: true + dataset: elasticsearch.server + paths: [/var/log/elasticsearch/*.log] + - id?: {id} + enabled?: true + dataset: elasticsearch.slowlog + paths: [/var/log/elasticsearch/*_index_search_slowlog.log, /var/log/elasticsearch/*_index_indexing_slowlog.log] + - type: elasticsearch/metrics + hosts: ["http://localhost:9200"] + hosts: ["http://localhost:9200"] + # api_key: xxxx + # username: elastic + # password: changeme + # ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + # ssl.ca_sha256: BxZ... + # ssl.certificate: ... + # ssl.key: ... + xpack.enabled: true + streams: + - id?: {id} + metricset: ccr + dataset: elasticseach.ccr + period: 10s + - id?: {id} + metricset: cluster_stats + dataset: elasticseach.cluster_stats + period: 10s + - id?: {id} + metricset: enrich + dataset: elasticseach.enrich + period: 10s + - id?: {id} + metricset: index + dataset: elasticseach.index + period: 10s + - id?: {id} + metricset: index_recovery + dataset: elasticseach.index_recovery + active_only: true + period: 10s + - id?: {id} + metricset: ml_jobs + dataset: elasticseach.ml_jobs + period: 10s + - id?: {id} + metricset: node_stats + dataset: elasticseach.node_stats + period: 10s + - id?: {id} + metricset: shard + dataset: elasticseach.shard + period: 10s + + ################################################################################################# + # AWS module + - id?: my-aws + title: Collect AWS + package: + name: epm/aws + version: 1.7.0 + inputs: + # Looking at the AWS modules, I believe each fileset need to be in their own + # buckets? + - type: s3 + credential_profile_name: fb-aws + #shared_credential_file: /etc/filebeat/aws_credentials + streams: + - id?: {id} + dataset: aws.s3 + queue_url: https://sqs.myregion.amazonaws.com/123456/sqs-queue + - id?: {id} + dataset: aws.s3access + queue_url: https://sqs.myregion.amazonaws.com/123456/sqs-queue + - id?: {id} + dataset: aws.vpcflow + queue_url: https://sqs.myregion.amazonaws.com/123456/sqs-queue + - id?: {id} + dataset: aws.cloudtrail + queue_url: https://sqs.myregion.amazonaws.com/123456/sqs-queue + - type: aws/metrics + access_key_id: '${AWS_ACCESS_KEY_ID:""}' + secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' + session_token: '${AWS_SESSION_TOKEN:""}' + #credential_profile_name: test-mb + #shared_credential_file: ... + streams: + - id?: {id} + metricset: usage + dataset: aws.usage + period: 5m + - id?: {id} + metricset: cloudwatch + dataset: aws.cloudwatch + period: 5m + name: ["CPUUtilization", "DiskWriteOps"] + tags.resource_type_filter: ec2:instance + #dimensions: + # - name: InstanceId + # value: i-0686946e22cf9494a + statistic: ["Average", "Maximum"] + - id?: {id} + metricset: ebs + dataset: aws.ebs + period: 5m + - id?: {id} + metricset: ec2 + dataset: aws.ec2 + period: 5m + - id?: {id} + metricset: elb + dataset: aws.elb + period: 5m + - id?: {id} + metricset: sns + dataset: aws.sns + period: 5m + - id?: {id} + metricset: sqs + dataset: aws.sqs + period: 5m + - id?: {id} + metricset: rds + dataset: aws.rds + period: 5m + - id?: {id} + metricset: billing + dataset: aws.billing + period: 12h + - id?: {id} + metricset: billing + dataset: aws.billing + period: 12h + - id?: {id} + metricset: s3_daily_storage + dataset: aws.s3_daily_storage + period: 24h + - id?: {id} + metricset: s3_request + dataset: aws.s3_request + period: 24h + + + ################################################################################################# + # Kubernetes + - id?: my-kubernetes + title: Collect Kubernetes + package: + name: epm/kubernetes + version: 1.7.0 + inputs: + - type: kubernetes-node/metrics + hosts: ["localhost:10250"] + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + ssl.certificate_authorities: + - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + # username: "user" + # password: "secret" + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster + # kube_config: ~/.kube/config + streams: + - id?: {id} + metricset: node + dataset: kubernetes.node + period: 10s + - id?: {id} + metricset: system + dataset: kubernetes.system + period: 10s + - id?: {id} + metricset: pod + dataset: kubernetes.pod + period: 10s + - id?: {id} + metricset: container + dataset: kubernetes.container + period: 10s + - id?: {id} + metricset: container + dataset: kubernetes.container + period: 10s + - id?: {id} + metricset: event + dataset: kubernetes.event + period: 10s + - type: kubernetes-state/metrics + hosts: ["kube-state-metrics:8080"] + streams: + - id?: {id} + metricset: state_node + dataset: kubernetes.node + period: 10s + - id?: {id} + metricset: state_deployment + dataset: kubernetes.deployment + period: 10s + - id?: {id} + metricset: state_replicaset + dataset: kubernetes.replicaset + period: 10s + - id?: {id} + metricset: state_statefulset + dataset: kubernetes.statefulset + period: 10s + - id?: {id} + metricset: state_pod + dataset: kubernetes.pod + period: 10s + - id?: {id} + metricset: state_container + dataset: kubernetes.container + period: 10s + - id?: {id} + metricset: state_container + dataset: kubernetes.container + period: 10s + - id?: {id} + metricset: state_cronjob + dataset: kubernetes.cronjob + period: 10s + - id?: {id} + metricset: state_resourcequota + dataset: kubernetes.resourcequota + period: 10s + - id?: {id} + metricset: state_service + dataset: kubernetes.service + period: 10s + - id?: {id} + metricset: state_persistentvolume + dataset: kubernetes.persistentvolume + period: 10s + - id?: {id} + metricset: state_persistentvolumeclaim + dataset: kubernetes.persistentvolumeclaim + period: 10s + + ################################################################################################# + # Docker + - id?: my-docker + title: Collect docker + package: + name: epm/docker + version: 1.7.0 + inputs: + - type: docker/metrics + hosts: ["localhost:10250"] + #labels.dedot: false + + # To connect to Docker over TLS you must specify a client and CA certificate. + #ssl: + #certificate_authority: "/etc/pki/root/ca.pem" + #certificate: "/etc/pki/client/cert.pem" + #key: "/etc/pki/client/cert.key" + streams: + - id?: {id} + metricset: container + dataset: docker.container + period: 10s + - id?: {id} + metricset: cpu + dataset: docker.cpu + period: 10s + - id?: {id} + metricset: diskio + dataset: docker.diskio + period: 10s + - id?: {id} + metricset: event + dataset: docker.event + period: 10s + - id?: {id} + metricset: healthcheck + dataset: docker.healthcheck + period: 10s + - id?: {id} + metricset: info + dataset: docker.info + period: 10s + - id?: {id} + metricset: memory + dataset: docker.memory + period: 10s + - id?: {id} + metricset: network + dataset: docker.network + period: 10s + +################################################################################################# +### Suricata +# + - id?: suricata-x1 + title: Suricata's data + namespace?: "abc" + package: + name: suricata + version: x.x.x + inputs: + - type: log + streams: + - id?: {id} + type: "typeX" + dataset: suricata.logs + path: /var/log/surcata/eve.json + +################################################################################################# +### suggestion 1 + - id?: myendpoint-x1 + title: Endpoint configuration + namespace?: "canada" + package: + name: endpoint + version: xxx + inputs: + - type: endpoint # Reserved key word + streams: + - type: malware + detect: true + prevent: false + notify_user: false + threshold: recommended + platform: windows + + - type: eventing + api: true + clr: false + dll_and_driver_load: false + dns: true + file: false + platform: windows + + - type: malware + detect: true + prevent: false + notify_user: false + threshold: recommended + platform: mac + + - type: eventing + api: true + clr: false + dll_and_driver_load: false + dns: true + file: false + platform: mac + + - type: malware + detect: true + prevent: false + notify_user: false + threshold: recommended + platform: linux + + - type: eventing + api: true + clr: false + dll_and_driver_load: false + dns: true + file: false + platform: linux + +################################################################################################# +### suggestion 2 + - id?: myendpoint-1 + title: Endpoint configuration + namespace?: "canada" + package: + name: epm/endpoint # This establish the link with the package and will allow to link it to endpoint app. + version: xxx + inputs: + - type: endpoint # Reserved key word + windows: + eventing: + api: true + clr: false + dll_and_driver_load: false + dns: true + ... + file: false + malware: + detect: true + prevent: false + notify_user: false + threshold: recommended + mac: + eventing: + file: true + network: false + process: false + ... + malware: + detect: true + prevent: false + notify_user: false + threshold: recommended + linux: + eventing: + file: true + network: false + process: false diff --git a/x-pack/agent/magefile.go b/x-pack/agent/magefile.go new file mode 100644 index 00000000000..d653b3bdc57 --- /dev/null +++ b/x-pack/agent/magefile.go @@ -0,0 +1,473 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build mage + +package main + +import ( + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" + + devtools "github.com/elastic/beats/v7/dev-tools/mage" + "github.com/elastic/beats/v7/dev-tools/mage/target/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" + "github.com/elastic/beats/v7/x-pack/agent/pkg/release" + + // mage:import + _ "github.com/elastic/beats/v7/dev-tools/mage/target/common" + + // mage:import + _ "github.com/elastic/beats/v7/dev-tools/mage/target/docs" +) + +const ( + goLintRepo = "golang.org/x/lint/golint" + goLicenserRepo = "github.com/elastic/go-licenser" + buildDir = "build" + metaDir = "_meta" +) + +// Aliases for commands required by master makefile +var Aliases = map[string]interface{}{ + "build": Build.All, +} + +func init() { + common.RegisterCheckDeps(Update, Check.All) + + devtools.BeatDescription = "Agent manages other beats based on configuration provided." + devtools.BeatLicense = "Elastic License" +} + +// Default set to build everything by default. +var Default = Build.All + +// Build namespace used to build binaries. +type Build mg.Namespace + +// Test namespace contains all the task for testing the projects. +type Test mg.Namespace + +// Check namespace contains tasks related check the actual code quality. +type Check mg.Namespace + +// Prepare tasks related to bootstrap the environment or get information about the environment. +type Prepare mg.Namespace + +// Format automatically format the code. +type Format mg.Namespace + +// Env returns information about the environment. +func (Prepare) Env() { + mg.Deps(Mkdir("build"), Build.GenerateConfig) + RunGo("version") + RunGo("env") +} + +// InstallGoLicenser install go-licenser to check license of the files. +func (Prepare) InstallGoLicenser() error { + return GoGet(goLicenserRepo) +} + +// InstallGoLint for the code. +func (Prepare) InstallGoLint() error { + return GoGet(goLintRepo) +} + +// All build all the things for the current projects. +func (Build) All() { + mg.Deps(Build.Binary) +} + +// GenerateConfig generates the configuration from _meta/agent.yml +func (Build) GenerateConfig() error { + mg.Deps(Mkdir(buildDir)) + return sh.Copy(filepath.Join(buildDir, "agent.yml"), filepath.Join(metaDir, "agent.yml")) +} + +// GolangCrossBuildOSS build the Beat binary inside of the golang-builder. +// Do not use directly, use crossBuild instead. +func GolangCrossBuildOSS() error { + params := devtools.DefaultGolangCrossBuildArgs() + params.InputFiles = []string{"cmd/agent/agent.go"} + return devtools.GolangCrossBuild(params) +} + +// GolangCrossBuild build the Beat binary inside of the golang-builder. +// Do not use directly, use crossBuild instead. +func GolangCrossBuild() error { + params := devtools.DefaultGolangCrossBuildArgs() + params.InputFiles = []string{"cmd/agent/agent.go"} + params.OutputDir = "build/golang-crossbuild" + if err := devtools.GolangCrossBuild(params); err != nil { + return err + } + + // TODO: no OSS bits just yet + // return GolangCrossBuildOSS() + + return nil +} + +// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). +func BuildGoDaemon() error { + return devtools.BuildGoDaemon() +} + +// BinaryOSS build the fleet artifact. +func (Build) BinaryOSS() error { + mg.Deps(Prepare.Env) + return RunGo( + "build", + "-o", filepath.Join(buildDir, "agent-oss"), + "-ldflags", flags(), + "cmd/agent/agent.go", + ) +} + +// Binary build the fleet artifact. +func (Build) Binary() error { + mg.Deps(Prepare.Env) + return RunGo( + "build", + "-o", filepath.Join(buildDir, "agent"), + "-ldflags", flags(), + "cmd/agent/agent.go", + ) +} + +// Dev make a special build with the Dev tags. +func (Build) Dev() error { + mg.Deps(Prepare.Env) + return RunGo( + "build", + "-tags", "dev", + "-o", filepath.Join(buildDir, "agent"), + "-ldflags", flags(), + "cmd/agent/agent.go", + ) +} + +// Clean up dev environment. +func (Build) Clean() { + os.RemoveAll(buildDir) +} + +// TestBinaries build the required binaries for the test suite. +func (Build) TestBinaries() error { + p := filepath.Join("pkg", "agent", "operation", "tests", "scripts") + + return combineErr( + RunGo("build", "-o", filepath.Join(p, "configurable-1.0-darwin-x86", "configurable"), filepath.Join(p, "configurable-1.0-darwin-x86", "main.go")), + RunGo("build", "-o", filepath.Join(p, "configurablebyfile-1.0-darwin-x86", "configurablebyfile"), filepath.Join(p, "configurablebyfile-1.0-darwin-x86", "main.go")), + ) +} + +// All run all the code checks. +func (Check) All() { + mg.SerialDeps(Check.License, Check.GoLint) +} + +// GoLint run the code through the linter. +func (Check) GoLint() error { + mg.Deps(Prepare.InstallGoLint) + packagesString, err := sh.Output("go", "list", "./...") + if err != nil { + return err + } + + packages := strings.Split(packagesString, "\n") + for _, pkg := range packages { + if strings.Contains(pkg, "/vendor/") { + continue + } + + if e := sh.RunV("golint", "-set_exit_status", pkg); e != nil { + err = multierror.Append(err, e) + } + } + + return err +} + +// License makes sure that all the Golang files have the appropriate license header. +func (Check) License() error { + mg.Deps(Prepare.InstallGoLicenser) + // exclude copied files until we come up with a better option + return combineErr( + sh.RunV("go-licenser", "-d", "-license", "Elastic"), + ) +} + +// Changes run git status --porcelain and return an error if we have changes or uncommited files. +func (Check) Changes() error { + out, err := sh.Output("git", "status", "--porcelain") + if err != nil { + return errors.New("cannot retrieve hash") + } + + if len(out) != 0 { + fmt.Fprintln(os.Stderr, "Changes:") + fmt.Fprintln(os.Stderr, out) + return fmt.Errorf("uncommited changes") + } + return nil +} + +// All runs all the tests. +func (Test) All() { + mg.SerialDeps(Test.Unit) +} + +// Unit runs all the unit tests. +func (Test) Unit() error { + mg.Deps(Prepare.Env, Build.TestBinaries) + return RunGo("test", "-race", "-v", "-coverprofile", filepath.Join(buildDir, "coverage.out"), "./...") +} + +// Coverage takes the coverages report from running all the tests and display the results in the browser. +func (Test) Coverage() error { + mg.Deps(Prepare.Env, Build.TestBinaries) + return RunGo("tool", "cover", "-html="+filepath.Join(buildDir, "coverage.out")) +} + +// All format automatically all the codes. +func (Format) All() { + mg.SerialDeps(Format.License) +} + +// License applies the right license header. +func (Format) License() error { + mg.Deps(Prepare.InstallGoLicenser) + return combineErr( + sh.RunV("go-licenser", "-license", "Elastic"), + ) +} + +// Package packages the Beat for distribution. +// Use SNAPSHOT=true to build snapshots. +// Use PLATFORMS to control the target platforms. +// Use VERSION_QUALIFIER to control the version qualifier. +func Package() { + start := time.Now() + defer func() { fmt.Println("package ran for", time.Since(start)) }() + + version, found := os.LookupEnv("BEAT_VERSION") + if !found { + version = release.Version() + } + + packedBeats := []string{"filebeat", "metricbeat"} + requiredPackages := []string{ + "darwin-x86_64.tar.gz", + "linux-x86.tar.gz", + "linux-x86_64.tar.gz", + "windows-x86.zip", + "windows-x86_64.zip", + } + + for _, b := range packedBeats { + pwd, err := filepath.Abs(filepath.Join("..", b)) + if err != nil { + panic(err) + } + + if requiredPackagesPresent(pwd, b, version, requiredPackages) { + continue + } + + cmd := exec.Command("mage", "package") + cmd.Dir = pwd + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = append(os.Environ(), fmt.Sprintf("PWD=%s", pwd), "AGENT_PACKAGING=on") + + if err := cmd.Run(); err != nil { + panic(err) + } + } + + // package agent + devtools.UseElasticAgentPackaging() + + mg.Deps(Update) + mg.Deps(CrossBuild, CrossBuildGoDaemon) + mg.SerialDeps(devtools.Package) +} + +func requiredPackagesPresent(basePath, beat, version string, requiredPackages []string) bool { + for _, pkg := range requiredPackages { + packageName := fmt.Sprintf("%s-%s-%s", beat, version, pkg) + path := filepath.Join(basePath, "build", "distributions", packageName) + + if _, err := os.Stat(path); err != nil { + fmt.Printf("Package '%s' does not exist on path: %s\n", packageName, path) + return false + } + } + return true +} + +// TestPackages tests the generated packages (i.e. file modes, owners, groups). +func TestPackages() error { + return devtools.TestPackages() +} + +// RunGo runs go command and output the feedback to the stdout and the stderr. +func RunGo(args ...string) error { + return sh.RunV(mg.GoCmd(), args...) +} + +// GoGet fetch a remote dependencies. +func GoGet(link string) error { + _, err := sh.Exec(map[string]string{"GO111MODULE": "off"}, os.Stdout, os.Stderr, "go", "get", link) + return err +} + +// Mkdir returns a function that create a directory. +func Mkdir(dir string) func() error { + return func() error { + if err := os.MkdirAll(dir, 0700); err != nil { + return fmt.Errorf("failed to create directory: %v, error: %+v", dir, err) + } + return nil + } +} + +func commitID() string { + commitID, err := sh.Output("git", "rev-parse", "--short", "HEAD") + if err != nil { + return "cannot retrieve hash" + } + return commitID +} + +func flags() string { + ts := time.Now().Format(time.RFC3339) + commitID := commitID() + + return fmt.Sprintf( + `-X "github.com/elastic/beats/v7/x-pack/agent/pkg/release.buildTime=%s" -X "github.com/elastic/beats/v7/x-pack/agent/pkg/release.commit=%s"`, + ts, + commitID, + ) +} + +// Update is an alias for executing fields, dashboards, config, includes. +func Update() { + mg.SerialDeps(Config, BuildSpec, BuildFleetCfg) +} + +// CrossBuild cross-builds the beat for all target platforms. +func CrossBuild() error { + return devtools.CrossBuild() +} + +// CrossBuildGoDaemon cross-builds the go-daemon binary using Docker. +func CrossBuildGoDaemon() error { + return devtools.CrossBuildGoDaemon() +} + +// Config generates both the short/reference/docker. +func Config() { + mg.Deps(configYML) +} + +// BuildSpec make sure that all the suppported program spec are built into the binary. +func BuildSpec() error { + // go run x-pack/agent/dev-tools/cmd/buildspec/buildspec.go --in x-pack/agent/spec/*.yml --out x-pack/agent/pkg/agent/program/supported.go + goF := filepath.Join("dev-tools", "cmd", "buildspec", "buildspec.go") + in := filepath.Join("spec", "*.yml") + out := filepath.Join("pkg", "agent", "program", "supported.go") + + fmt.Printf(">> Buildspec from %s to %s\n", in, out) + return RunGo("run", goF, "--in", in, "--out", out) +} + +func configYML() error { + return devtools.Config(devtools.AllConfigTypes, ConfigFileParams(), ".") +} + +// ConfigFileParams returns the parameters for generating OSS config. +func ConfigFileParams() devtools.ConfigFileParams { + return devtools.ConfigFileParams{ + ShortParts: []string{ + devtools.XPackBeatDir("_meta/common.p1.yml"), + devtools.XPackBeatDir("_meta/common.p2.yml"), + }, + ReferenceParts: []string{ + devtools.XPackBeatDir("_meta/common.reference.p1.yml"), + devtools.XPackBeatDir("_meta/common.reference.p2.yml"), + }, + DockerParts: []string{ + devtools.XPackBeatDir("_meta/agent.docker.yml"), + }, + } +} + +// fieldDocs generates docs/fields.asciidoc containing all fields +// (including x-pack). +func fieldDocs() error { + inputs := []string{ + devtools.OSSBeatDir("input"), + devtools.XPackBeatDir("input"), + } + output := devtools.CreateDir("build/fields/fields.all.yml") + if err := devtools.GenerateFieldsYAMLTo(output, inputs...); err != nil { + return err + } + return devtools.Docs.FieldDocs(output) +} + +func combineErr(errors ...error) error { + var e error + for _, err := range errors { + if err == nil { + continue + } + e = multierror.Append(e, err) + } + return e +} + +// GoTestUnit is an alias for goUnitTest. +func GoTestUnit() { + mg.Deps(unittest.GoUnitTest) +} + +// UnitTest performs unit test on agent. +func UnitTest() { + mg.Deps(Test.All) +} + +// IntegTest calls go integtest, we dont have python integ test so far +// TODO: call integtest mage package when python tests are available +func IntegTest() { + os.Create(filepath.Join("build", "TEST-go-integration.out")) +} + +// BuildFleetCfg embed the default fleet configuration as part of the binary. +func BuildFleetCfg() error { + goF := filepath.Join("dev-tools", "cmd", "buildfleetcfg", "buildfleetcfg.go") + in := filepath.Join("_meta", "agent.fleet.yml") + out := filepath.Join("pkg", "agent", "application", "configuration_embed.go") + + fmt.Printf(">> BuildFleetCfg %s to %s\n", in, out) + return RunGo("run", goF, "--in", in, "--out", out) +} + +// Fields placeholder methods to fix the windows build. +func Fields() error { + return nil +} diff --git a/x-pack/agent/main.go b/x-pack/agent/main.go new file mode 100644 index 00000000000..db421a72f16 --- /dev/null +++ b/x-pack/agent/main.go @@ -0,0 +1,24 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "fmt" + "math/rand" + "os" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/cmd" +) + +// Setups and Runs agent. +func main() { + rand.Seed(time.Now().UnixNano()) + command := cmd.NewCommand() + if err := command.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} diff --git a/x-pack/agent/main_test.go b/x-pack/agent/main_test.go new file mode 100644 index 00000000000..cdced7ea677 --- /dev/null +++ b/x-pack/agent/main_test.go @@ -0,0 +1,29 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. +package main + +// This file is mandatory as otherwise the agent.test binary is not generated correctly. +import ( + "flag" + "testing" + + // Just using this a place holder. + "github.com/elastic/beats/v7/x-pack/filebeat/cmd" +) + +var systemTest *bool + +func init() { + testing.Init() + systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") + cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) +} + +// Test started when the test binary is started. Only calls main. +func TestSystem(t *testing.T) { + if *systemTest { + main() + } +} diff --git a/x-pack/agent/pkg/agent/application/action_dispatcher.go b/x-pack/agent/pkg/agent/application/action_dispatcher.go new file mode 100644 index 00000000000..e37c54683bd --- /dev/null +++ b/x-pack/agent/pkg/agent/application/action_dispatcher.go @@ -0,0 +1,113 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" +) + +type action = fleetapi.Action + +type actionHandler interface { + Handle(ctx context.Context, a action, acker fleetAcker) error +} + +type actionHandlers map[string]actionHandler + +type actionDispatcher struct { + ctx context.Context + log *logger.Logger + handlers actionHandlers + def actionHandler +} + +func newActionDispatcher(ctx context.Context, log *logger.Logger, def actionHandler) (*actionDispatcher, error) { + var err error + if log == nil { + log, err = logger.New() + if err != nil { + return nil, err + } + } + + if def == nil { + return nil, errors.New("missing default handler") + } + + return &actionDispatcher{ + ctx: ctx, + log: log, + handlers: make(actionHandlers), + def: def, + }, nil +} + +func (ad *actionDispatcher) Register(a action, handler actionHandler) error { + k := ad.key(a) + _, ok := ad.handlers[k] + if ok { + return fmt.Errorf("action with type %T is already registered", a) + } + ad.handlers[k] = handler + return nil +} + +func (ad *actionDispatcher) MustRegister(a action, handler actionHandler) { + err := ad.Register(a, handler) + if err != nil { + panic("could not register action, error: " + err.Error()) + } +} + +func (ad *actionDispatcher) key(a action) string { + return reflect.TypeOf(a).String() +} + +func (ad *actionDispatcher) Dispatch(acker fleetAcker, actions ...action) error { + if len(actions) == 0 { + ad.log.Debug("No action to dispatch") + return nil + } + + ad.log.Debugf( + "Dispatch %d actions of types: %s", + len(actions), + strings.Join(detectTypes(actions), ", "), + ) + + for _, action := range actions { + if err := ad.dispatchAction(action, acker); err != nil { + ad.log.Debugf("Failed to dispatch action '%+v', error: %+v", action, err) + return err + } + ad.log.Debugf("Successfully dispatched action: '%+v'", action) + } + + return acker.Commit(ad.ctx) +} + +func (ad *actionDispatcher) dispatchAction(a action, acker fleetAcker) error { + handler, found := ad.handlers[(ad.key(a))] + if !found { + return ad.def.Handle(ad.ctx, a, acker) + } + + return handler.Handle(ad.ctx, a, acker) +} + +func detectTypes(actions []action) []string { + str := make([]string, len(actions)) + for idx, action := range actions { + str[idx] = reflect.TypeOf(action).String() + } + return str +} diff --git a/x-pack/agent/pkg/agent/application/action_dispatcher_test.go b/x-pack/agent/pkg/agent/application/action_dispatcher_test.go new file mode 100644 index 00000000000..dab1ac776e7 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/action_dispatcher_test.go @@ -0,0 +1,101 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +type mockHandler struct { + received action + called bool + err error +} + +func (h *mockHandler) Handle(_ context.Context, a action, acker fleetAcker) error { + h.called = true + h.received = a + return h.err +} + +type mockAction struct{} + +func (m *mockAction) ID() string { return "mockAction" } +func (m *mockAction) Type() string { return "mockAction" } +func (m *mockAction) String() string { return "mockAction" } + +type mockActionUnknown struct{} + +func (m *mockActionUnknown) ID() string { return "mockActionUnknown" } +func (m *mockActionUnknown) Type() string { return "mockActionUnknown" } +func (m *mockActionUnknown) String() string { return "mockActionUnknown" } + +type mockActionOther struct{} + +func (m *mockActionOther) ID() string { return "mockActionOther" } +func (m *mockActionOther) Type() string { return "mockActionOther" } +func (m *mockActionOther) String() string { return "mockActionOther" } + +func TestActionDispatcher(t *testing.T) { + ack := newNoopAcker() + + t.Run("Success to dispatch multiples events", func(t *testing.T) { + def := &mockHandler{} + d, err := newActionDispatcher(context.Background(), nil, def) + require.NoError(t, err) + + success1 := &mockHandler{} + success2 := &mockHandler{} + + d.Register(&mockAction{}, success1) + d.Register(&mockActionOther{}, success2) + + action1 := &mockAction{} + action2 := &mockActionOther{} + + err = d.Dispatch(ack, action1, action2) + + require.NoError(t, err) + + require.True(t, success1.called) + require.Equal(t, action1, success1.received) + + require.True(t, success2.called) + require.Equal(t, action2, success2.received) + + require.False(t, def.called) + require.Nil(t, def.received) + }) + + t.Run("Unknown action are caught by the unknown handler", func(t *testing.T) { + def := &mockHandler{} + d, err := newActionDispatcher(context.Background(), nil, def) + require.NoError(t, err) + + action := &mockActionUnknown{} + err = d.Dispatch(ack, action) + + require.True(t, def.called) + require.Equal(t, action, def.received) + }) + + t.Run("Could not register two handlers on the same action", func(t *testing.T) { + success1 := &mockHandler{} + success2 := &mockHandler{} + + def := &mockHandler{} + d, err := newActionDispatcher(context.Background(), nil, def) + require.NoError(t, err) + + err = d.Register(&mockAction{}, success1) + require.NoError(t, err) + + err = d.Register(&mockAction{}, success2) + require.Error(t, err) + }) +} diff --git a/x-pack/agent/pkg/agent/application/action_store.go b/x-pack/agent/pkg/agent/application/action_store.go new file mode 100644 index 00000000000..72fddbed399 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/action_store.go @@ -0,0 +1,157 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + "fmt" + + yaml "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" +) + +// actionStore receives multiples actions to persist to disk, the implementation of the store only +// take care of action policy change every other action are discarded. The store will only keep the +// last good action on disk, we assume that the action is added to the store after it was ACK with +// Fleet. The store is not threadsafe. +type actionStore struct { + log *logger.Logger + store storeLoad + dirty bool + action action +} + +func newActionStore(log *logger.Logger, store storeLoad) (*actionStore, error) { + // If the store exists we will read it, if any errors is returned we assume we do not have anything + // persisted and we return an empty store. + reader, err := store.Load() + if err != nil { + return &actionStore{log: log, store: store}, nil + } + + var action actionPolicyChangeSerializer + + dec := yaml.NewDecoder(reader) + if err := dec.Decode(&action); err != nil { + return nil, err + } + + apc := fleetapi.ActionPolicyChange(action) + + return &actionStore{ + log: log, + store: store, + action: &apc, + }, nil +} + +// Add is only taking care of ActionPolicyChange for now and will only keep the last one it receive, +// any other type of action will be silently ignored. +func (s *actionStore) Add(a action) { + switch v := a.(type) { + case *fleetapi.ActionPolicyChange: + // Only persist the action if the action is different. + if s.action != nil && s.action.ID() == v.ID() { + return + } + s.dirty = true + s.action = a + } +} + +func (s *actionStore) Save() error { + defer func() { s.dirty = false }() + if !s.dirty { + return nil + } + + apc, ok := s.action.(*fleetapi.ActionPolicyChange) + if !ok { + return fmt.Errorf("incompatible type, expected ActionPolicyChange and received %T", s.action) + } + + serialize := actionPolicyChangeSerializer(*apc) + + reader, err := yamlToReader(&serialize) + if err != nil { + return err + } + + if err := s.store.Save(reader); err != nil { + return err + } + s.log.Debugf("save on disk action policy change: %+v", s.action) + return nil +} + +// Actions returns a slice of action to execute in order, currently only a action policy change is +// persisted. +func (s *actionStore) Actions() []action { + if s.action == nil { + return []action{} + } + + return []action{s.action} +} + +// actionPolicyChangeSerializer is a struct that add YAML serialization, I don't think serialization +// is a concern of the fleetapi package. I went this route so I don't have to do much refactoring. +// +// There are four ways to achieve the same results: +// 1. We create a second struct that map the existing field. +// 2. We add the serialization in the fleetapi. +// 3. We move the actual action type outside of the actual fleetapi package. +// 4. We have two sets of type. +// +// This could be done in a refactoring. +type actionPolicyChangeSerializer struct { + ActionID string `yaml:"action_id"` + ActionType string `yaml:"action_type"` + Policy map[string]interface{} `yaml:"policy"` +} + +// Add a guards between the serializer structs and the original struct. +var _ actionPolicyChangeSerializer = actionPolicyChangeSerializer(fleetapi.ActionPolicyChange{}) + +// actionStoreAcker wraps an existing acker and will send any acked event to the action store, +// its up to the action store to decide if we need to persist the event for future replay or just +// discard the event. +type actionStoreAcker struct { + acker fleetAcker + store *actionStore +} + +func (a *actionStoreAcker) Ack(ctx context.Context, action fleetapi.Action) error { + if err := a.acker.Ack(ctx, action); err != nil { + return err + } + a.store.Add(action) + return a.store.Save() +} + +func (a *actionStoreAcker) Commit(ctx context.Context) error { + return nil +} + +func newActionStoreAcker(acker fleetAcker, store *actionStore) *actionStoreAcker { + return &actionStoreAcker{acker: acker, store: store} +} + +func replayActions( + log *logger.Logger, + dispatcher dispatcher, + acker fleetAcker, + actions ...action, +) error { + log.Info("restoring current policy from disk") + + if err := dispatcher.Dispatch(acker, actions...); err != nil { + return err + } + + return nil +} diff --git a/x-pack/agent/pkg/agent/application/action_store_test.go b/x-pack/agent/pkg/agent/application/action_store_test.go new file mode 100644 index 00000000000..32660692f14 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/action_store_test.go @@ -0,0 +1,104 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/storage" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" +) + +func TestActionStore(t *testing.T) { + log, _ := logger.New() + withFile := func(fn func(t *testing.T, file string)) func(*testing.T) { + return func(t *testing.T) { + dir, err := ioutil.TempDir("", "action-store") + require.NoError(t, err) + defer os.RemoveAll(dir) + file := filepath.Join(dir, "config.yml") + fn(t, file) + } + } + + t.Run("action returns empty when no action is saved on disk", + withFile(func(t *testing.T, file string) { + s := storage.NewDiskStore(file) + store, err := newActionStore(log, s) + require.NoError(t, err) + require.Equal(t, 0, len(store.Actions())) + })) + + t.Run("will discard silently unknown action", + withFile(func(t *testing.T, file string) { + actionPolicyChange := &fleetapi.ActionUnknown{ + ActionID: "abc123", + } + + s := storage.NewDiskStore(file) + store, err := newActionStore(log, s) + require.NoError(t, err) + + require.Equal(t, 0, len(store.Actions())) + store.Add(actionPolicyChange) + err = store.Save() + require.NoError(t, err) + require.Equal(t, 0, len(store.Actions())) + })) + + t.Run("can save to disk known action type", + withFile(func(t *testing.T, file string) { + actionPolicyChange := &fleetapi.ActionPolicyChange{ + ActionID: "abc123", + ActionType: "POLICY_CHANGE", + Policy: map[string]interface{}{ + "hello": "world", + }, + } + + s := storage.NewDiskStore(file) + store, err := newActionStore(log, s) + require.NoError(t, err) + + require.Equal(t, 0, len(store.Actions())) + store.Add(actionPolicyChange) + err = store.Save() + require.NoError(t, err) + require.Equal(t, 1, len(store.Actions())) + + s = storage.NewDiskStore(file) + store1, err := newActionStore(log, s) + require.NoError(t, err) + + actions := store1.Actions() + require.Equal(t, 1, len(actions)) + + require.Equal(t, actionPolicyChange, actions[0]) + })) + + t.Run("when we ACK we save to disk", + withFile(func(t *testing.T, file string) { + actionPolicyChange := &fleetapi.ActionPolicyChange{ + ActionID: "abc123", + } + + s := storage.NewDiskStore(file) + store, err := newActionStore(log, s) + require.NoError(t, err) + + acker := newActionStoreAcker(&testAcker{}, store) + require.Equal(t, 0, len(store.Actions())) + + require.NoError(t, acker.Ack(context.Background(), actionPolicyChange)) + require.Equal(t, 1, len(store.Actions())) + })) +} diff --git a/x-pack/agent/pkg/agent/application/application.go b/x-pack/agent/pkg/agent/application/application.go new file mode 100644 index 00000000000..9cc2a83c088 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/application.go @@ -0,0 +1,71 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +// Application is the application interface implemented by the different running mode. +type Application interface { + Start() error + Stop() error + AgentInfo() *info.AgentInfo +} + +// New creates a new Agent and bootstrap the required subsystem. +func New(log *logger.Logger, pathConfigFile string) (Application, error) { + // Load configuration from disk to understand in which mode of operation + // we must start the agent, the mode of operation cannot be changed without restarting the + // agent. + config, err := config.LoadYAML(pathConfigFile) + if err != nil { + return nil, err + } + + if err := InjectAgentConfig(config); err != nil { + return nil, err + } + + return createApplication(log, pathConfigFile, config) +} + +func createApplication( + log *logger.Logger, + pathConfigFile string, + config *config.Config, +) (Application, error) { + + log.Info("Detecting execution mode") + c := localDefaultConfig() + err := config.Unpack(c) + if err != nil { + return nil, errors.New(err, "initiating application") + } + + mgmt := defaultManagementConfig() + err = c.Management.Unpack(mgmt) + if err != nil { + return nil, errors.New(err, "initiating application") + } + + ctx := context.Background() + + switch mgmt.Mode { + case localMode: + log.Info("Agent is managed locally") + return newLocal(ctx, log, pathConfigFile, config) + case fleetMode: + log.Info("Agent is managed by Fleet") + return newManaged(ctx, log, config) + default: + return nil, ErrInvalidMgmtMode + } +} diff --git a/x-pack/agent/pkg/agent/application/application_test.go b/x-pack/agent/pkg/agent/application/application_test.go new file mode 100644 index 00000000000..7c1975fef64 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/application_test.go @@ -0,0 +1,5 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application diff --git a/x-pack/agent/pkg/agent/application/config.go b/x-pack/agent/pkg/agent/application/config.go new file mode 100644 index 00000000000..9339350b737 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/config.go @@ -0,0 +1,160 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "fmt" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/kibana" + fleetreporter "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter/fleet" + logreporter "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter/log" +) + +// TODO(ph) correctly setup global path. +func fleetAgentConfigPath() string { + return info.AgentConfigFile +} + +// TODO(ph) correctly setup with global path. +func fleetActionStoreFile() string { + return info.AgentActionStoreFile +} + +// Config define the configuration of the Agent. +type Config struct { + Management *config.Config `config:"management"` +} + +func localDefaultConfig() *Config { + return &Config{} +} + +type managementMode int + +// Define the supported mode of management. +const ( + localMode managementMode = iota + 1 + fleetMode +) + +var managementModeMap = map[string]managementMode{ + "local": localMode, + "fleet": fleetMode, +} + +func (m *managementMode) Unpack(v string) error { + mgt, ok := managementModeMap[v] + if !ok { + return fmt.Errorf( + "unknown management mode, received '%s' and valid values are local or fleet", + v, + ) + } + *m = mgt + return nil +} + +// ManagementConfig defines the options for the running of the beats. +type ManagementConfig struct { + Mode managementMode `config:"mode"` + Reporting *logreporter.Config `config:"reporting.log"` +} + +func defaultManagementConfig() *ManagementConfig { + return &ManagementConfig{ + Mode: localMode, + } +} + +type localConfig struct { + Management *localManagementConfig `config:"management" yaml:"management"` +} + +type localManagementConfig struct { + Reload *reloadConfig `config:"reload" yaml:"reload"` + Path string `config:"path" yaml:"path"` + Reporting *logreporter.Config `config:"reporting" yaml:"reporting"` +} + +type reloadConfig struct { + Enabled bool `config:"enabled" yaml:"enabled"` + Period time.Duration `config:"period" yaml:"period"` +} + +func (r *reloadConfig) Validate() error { + if r.Enabled { + if r.Period <= 0 { + return ErrInvalidPeriod + } + } + return nil +} + +func localConfigDefault() *localConfig { + return &localConfig{ + Management: &localManagementConfig{ + Reload: &reloadConfig{ + Enabled: true, + Period: 10 * time.Second, + }, + Reporting: logreporter.DefaultLogConfig(), + }, + } +} + +// FleetAgentConfig is the internal configuration of the agent after the enrollment is done, +// this configuration is not exposed in anyway in the agent.yml and is only internal configuration. +type FleetAgentConfig struct { + API *APIAccess `config:"api" yaml:"api"` + Reporting *LogReporting `config:"reporting" yaml:"reporting"` +} + +// APIAccess contains the required details to connect to the Kibana endpoint. +type APIAccess struct { + AccessAPIKey string `config:"access_api_key" yaml:"access_api_key"` + Kibana *kibana.Config `config:"kibana" yaml:"kibana"` +} + +// LogReporting define the fleet options for log reporting. +type LogReporting struct { + Log *logreporter.Config `config:"log" yaml:"log"` + Fleet *fleetreporter.ManagementConfig `config:"fleet" yaml:"fleet"` +} + +// Validate validates the required fields for accessing the API. +func (e *APIAccess) Validate() error { + if len(e.AccessAPIKey) == 0 { + return errors.New("empty access token", errors.TypeConfig) + } + + if e.Kibana == nil || len(e.Kibana.Host) == 0 { + return errors.New("missing Kibana host configuration", errors.TypeConfig) + } + + return nil +} + +func defaultFleetAgentConfig() *FleetAgentConfig { + return &FleetAgentConfig{ + Reporting: &LogReporting{ + Log: logreporter.DefaultLogConfig(), + Fleet: fleetreporter.DefaultFleetManagementConfig(), + }, + } +} + +func createFleetConfigFromEnroll(access *APIAccess) (*FleetAgentConfig, error) { + if err := access.Validate(); err != nil { + return nil, errors.New(err, "invalid enrollment options", errors.TypeConfig) + } + + cfg := defaultFleetAgentConfig() + cfg.API = access + return cfg, nil +} diff --git a/x-pack/agent/pkg/agent/application/config_request.go b/x-pack/agent/pkg/agent/application/config_request.go new file mode 100644 index 00000000000..52e8cd3b4cd --- /dev/null +++ b/x-pack/agent/pkg/agent/application/config_request.go @@ -0,0 +1,52 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "strings" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/program" +) + +const shortID = 8 + +type configRequest struct { + id string + createdAt time.Time + programs []program.Program +} + +func (c *configRequest) String() string { + names := c.ProgramNames() + return "[" + c.ShortID() + "] Config: " + strings.Join(names, ", ") +} + +func (c *configRequest) ID() string { + return c.id +} + +func (c *configRequest) ShortID() string { + if len(c.id) <= shortID { + return c.id + } + return c.id[0:shortID] +} + +func (c *configRequest) CreatedAt() time.Time { + return c.createdAt +} + +func (c *configRequest) Programs() []program.Program { + return c.programs +} + +func (c *configRequest) ProgramNames() []string { + names := make([]string, 0, len(c.programs)) + for _, name := range c.programs { + names = append(names, name.Spec.Name) + } + return names +} diff --git a/x-pack/agent/pkg/agent/application/config_request_test.go b/x-pack/agent/pkg/agent/application/config_request_test.go new file mode 100644 index 00000000000..f0140aa8ffd --- /dev/null +++ b/x-pack/agent/pkg/agent/application/config_request_test.go @@ -0,0 +1,27 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestConfigRequest(t *testing.T) { + t.Run("limit case for ShortID()", func(t *testing.T) { + c := configRequest{id: "bye"} + require.Equal(t, "bye", c.ShortID()) + + // TODO(PH): add validation when we create the config request. + c = configRequest{id: ""} + require.Equal(t, "", c.ShortID()) + }) + + t.Run("ShortID()", func(t *testing.T) { + c := configRequest{id: "HELLOWORLDBYEBYE"} + require.Equal(t, "HELLOWOR", c.ShortID()) + }) +} diff --git a/x-pack/agent/pkg/agent/application/config_test.go b/x-pack/agent/pkg/agent/application/config_test.go new file mode 100644 index 00000000000..c1cd3e29459 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/config_test.go @@ -0,0 +1,74 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" +) + +func TestConfig(t *testing.T) { + testMgmtMode(t) + testLocalConfig(t) +} + +func testMgmtMode(t *testing.T) { + t.Run("succeed when local mode is selected", func(t *testing.T) { + c := mustWithConfigMode("local") + m := ManagementConfig{} + err := c.Unpack(&m) + require.NoError(t, err) + assert.Equal(t, localMode, m.Mode) + + }) + + t.Run("succeed when fleet mode is selected", func(t *testing.T) { + c := mustWithConfigMode("fleet") + m := ManagementConfig{} + err := c.Unpack(&m) + require.NoError(t, err) + assert.Equal(t, fleetMode, m.Mode) + }) + + t.Run("fails on unknown mode", func(t *testing.T) { + c := mustWithConfigMode("what") + m := ManagementConfig{} + err := c.Unpack(&m) + require.Error(t, err) + }) +} + +func testLocalConfig(t *testing.T) { + t.Run("only accept positive period", func(t *testing.T) { + c := config.MustNewConfigFrom(map[string]interface{}{ + "enabled": true, + "period": 0, + }) + + m := reloadConfig{} + err := c.Unpack(&m) + require.Error(t, err) + + c = config.MustNewConfigFrom(map[string]interface{}{ + "enabled": true, + "period": 1, + }) + + err = c.Unpack(&m) + require.NoError(t, err) + }) +} + +func mustWithConfigMode(m string) *config.Config { + return config.MustNewConfigFrom( + map[string]interface{}{ + "mode": m, + }, + ) +} diff --git a/x-pack/agent/pkg/agent/application/configuration_embed.go b/x-pack/agent/pkg/agent/application/configuration_embed.go new file mode 100644 index 00000000000..b87532e34ff --- /dev/null +++ b/x-pack/agent/pkg/agent/application/configuration_embed.go @@ -0,0 +1,25 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by dev-tools/cmd/buildfleetcfg/buildfleetcfg.go - DO NOT EDIT. + +package application + +import "github.com/elastic/beats/v7/x-pack/agent/pkg/packer" + +// DefaultAgentFleetConfig is the content of the default configuration when we enroll a beat, the agent.yml +// will be replaced with this variables. +var DefaultAgentFleetConfig []byte + +func init() { + // Packed File + // _meta/agent.fleet.yml + unpacked := packer.MustUnpack("eJyMld2OozgWx+/3MfoBZvkoWs1KcxGnCmM60Buo2Ng3K2ynDImdoEmAwGrffWVIpapnZldzEUUCcz7+53f+/veXf5n9tfp7pfan6y9ver+//jIa/eUfX9DkhP98/Ws/9CI1N3ikZFB/9ZvHb3s+oJdE0zKfENQTglnPDWuZj0dGtkqS4MhKpHJz06zcXlCse1mAAyVPikFsKhJoBN1e+FslPOxIqDvmp9/RCIww4RXF+ZkVYFORpObmReHdsUPRRwxZgiMtc23PVWWqmNEXVgCHj+DAvcBURLrC7JSEtUZxpmWct9zIyZ6n5dbmqe17bkIHxZkrYtCLU67ZGjT7AkAOsZbrp++cRB0lUnOCO/l8/o7W4Mq9XP9oVg1bemvWqj1yX3YchjWbz6wOKM56WSYHVoCeNcCp4E7RMnEqwmrq59NmDUZWRm5VJlqMQHMYTRLqA4K3tvJ2Svj5KEnmyDLRCEYdW4OOErflRiju0aUvmLTcRGNF8LRuVgqNQG+MUBtfttzkvfTT6/5ZeZtT24rVWT1qwrgoXs8KmdqRMZh+NN/6n2oz+ELLzKlI2lHvW89g6HNz6ynJp423zHwzonufoJZQKQm/3WvKW2HwQcJw3BfzLBxKdFeR4IRg3ksvuHAvOlpGBIwOlRedWJlaXR1aJidW5m8M6pGRzOF+ElidN17dc7LrGbnVws9bOoYfesdpc+/dqYiruY8dBFkvGvBxxrI1goGS7Dc7S3teQn1lJHTl81mlE3jXz1Tkpj/3RAswSJJcKpKqysMBivFka2cmHFHMtDDa7Mkc/1M+fEQwGiXUhpLMEcNZoXWqKkIX3iBrOdwpy62IEy083Mk1qNny71fkdkFxbvWzPM96oRi41NxaOoLfWHlU7JT0vPjgZmOyHsVS01PWSrjrbD8CymHeCzvrJjxzz9Ub7zHbT3oGA/NX7zq2/JQ5lNwupb/owCCeqJ+01ODpvpcX7mW1hLrnzcz345vfzbUXcdIzP6l5mXboRTuLV0QX4X1Tssw0Wy3aLHommpJ8ZvzxXQEmAfGhIqylXuRUJOzE+GR5+X2dX9Ea9RUMr6wIP3TxdCf8vOZwsHs6CBMeWJlNP9SSl89sujVfg4H7iYNg4HKSaNEAw32khI3tBdYXHt/eeb1WJHgTMBxnhl5XQ/q8WnYCuvUe6qskjprfr0EnidtYP1z6zAZaym7WKk566uFJeMsOcxI9lb71ou1X9JzamMP/4PsgDK7lwnbHSvFTrE0D/AriDsVg5F6mhZ/OvL37HZ2fZUaS28Xu6RLLcpsE75762JH7jKyO4rRV3AdamGUWD+8/ZS2Di/fTAhhK9MV618zhouHbH3ZOtSMr83H/evdMiDtKkpkhBrHDysRqbP3R9uJUEM+MyDgJ5rzWs/2speamNw14YYTVktzujLGax5ne2Dl/xP1q90qS3d275lgtK7M36oUuP21trZp62lhP/cSCZXyu1T7/Q67ntLvPqJj7sb7nz7vcs2Z1Q9DVlAT2nnRQLFsOB0XNTgmDHWG0Zavn812lr6wANTvlc077jJmo5TEeWTEz/6ih9LJeksD50azeveto93n/aV+YvReh3fejqsiT4mWqqMGO9LDm/7+fYfYsmNs9sh48pXH6wXYU/mkuy25FXJcXj1pa3oCrGEHDytxnxPL4rrn1g1wzE7k83s5sptPqnvdTjtezyoiz5H7BTwKGHSOBU5HoYuuTEI92h21+6dXa3kHW+9/zixE4H2xHE4I/x9jcfQB72qnWc58PzSgJjug5fcxJTJbb1QU902GzXt3Sw+Pe+FP27DtW1gP3As1PeUvJ8PXxTv3665f//O2/AQAA//+lslh9") + raw, ok := unpacked["_meta/agent.fleet.yml"] + if !ok { + // ensure we have something loaded. + panic("agent.fleet.yml is not included in the binary") + } + DefaultAgentFleetConfig = raw +} diff --git a/x-pack/agent/pkg/agent/application/emitter.go b/x-pack/agent/pkg/agent/application/emitter.go new file mode 100644 index 00000000000..4ccb4aba946 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/emitter.go @@ -0,0 +1,64 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "strings" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/program" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/transpiler" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +type decoratorFunc = func(string, *transpiler.AST, []program.Program) ([]program.Program, error) + +func emitter(log *logger.Logger, router *router, decorators ...decoratorFunc) emitterFunc { + return func(c *config.Config) error { + if err := InjectAgentConfig(c); err != nil { + return err + } + + log.Debug("Transforming configuration into a tree") + m, err := c.ToMapStr() + if err != nil { + return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) + } + + ast, err := transpiler.NewAST(m) + if err != nil { + return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) + } + + log.Debugf("Supported programs: %s", strings.Join(program.KnownProgramNames(), ", ")) + log.Debug("Converting single configuration into specific programs configuration") + + programsToRun, err := program.Programs(ast) + if err != nil { + return err + } + + for _, decorator := range decorators { + for outputType, ptr := range programsToRun { + programsToRun[outputType], err = decorator(outputType, ast, ptr) + if err != nil { + return err + } + } + } + + return router.Dispatch(ast.HashStr(), programsToRun) + } +} + +func readfiles(files []string, emitter emitterFunc) error { + c, err := config.LoadFiles(files...) + if err != nil { + return errors.New(err, "could not load or merge configuration", errors.TypeConfig) + } + + return emitter(c) +} diff --git a/x-pack/agent/pkg/agent/application/enroll_cmd.go b/x-pack/agent/pkg/agent/application/enroll_cmd.go new file mode 100644 index 00000000000..480563dfdaf --- /dev/null +++ b/x-pack/agent/pkg/agent/application/enroll_cmd.go @@ -0,0 +1,191 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "bytes" + "context" + "io" + "net/http" + "net/url" + + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/storage" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" + "github.com/elastic/beats/v7/x-pack/agent/pkg/kibana" +) + +type store interface { + Save(io.Reader) error +} + +type storeLoad interface { + store + Load() (io.ReadCloser, error) +} + +type clienter interface { + Send( + ctx context.Context, + method string, + path string, + params url.Values, + headers http.Header, + body io.Reader, + ) (*http.Response, error) + + URI() string +} + +// EnrollCmd is an enroll subcommand that interacts between the Kibana API and the Agent. +type EnrollCmd struct { + log *logger.Logger + options *EnrollCmdOption + client clienter + configStore store + kibanaConfig *kibana.Config +} + +// EnrollCmdOption define all the supported enrollment option. +type EnrollCmdOption struct { + ID string + URL string + CAs []string + CASha256 []string + UserProvidedMetadata map[string]interface{} + EnrollAPIKey string +} + +func (e *EnrollCmdOption) kibanaConfig() (*kibana.Config, error) { + cfg, err := kibana.NewConfigFromURL(e.URL) + if err != nil { + return nil, err + } + + // Add any SSL options from the CLI. + if len(e.CAs) > 0 || len(e.CASha256) > 0 { + cfg.TLS = &tlscommon.Config{ + CAs: e.CAs, + CASha256: e.CASha256, + } + } + + return cfg, nil +} + +// NewEnrollCmd creates a new enroll command that will registers the current beats to the remote +// system. +func NewEnrollCmd( + log *logger.Logger, + options *EnrollCmdOption, + configPath string, +) (*EnrollCmd, error) { + + store := storage.NewReplaceOnSuccessStore( + configPath, + DefaultAgentFleetConfig, + storage.NewEncryptedDiskStore(fleetAgentConfigPath(), []byte("")), + ) + + return NewEnrollCmdWithStore( + log, + options, + configPath, + store, + ) +} + +//NewEnrollCmdWithStore creates an new enrollment and accept a custom store. +func NewEnrollCmdWithStore( + log *logger.Logger, + options *EnrollCmdOption, + configPath string, + store store, +) (*EnrollCmd, error) { + + cfg, err := options.kibanaConfig() + if err != nil { + return nil, errors.New(err, + "invalid Kibana configuration", + errors.TypeConfig, + errors.M(errors.MetaKeyURI, options.URL)) + } + + client, err := fleetapi.NewWithConfig(log, cfg) + if err != nil { + return nil, errors.New(err, + "fail to create the API client", + errors.TypeNetwork, + errors.M(errors.MetaKeyURI, options.URL)) + } + + return &EnrollCmd{ + log: log, + client: client, + options: options, + kibanaConfig: cfg, + configStore: store, + }, nil +} + +// Execute tries to enroll the agent into Fleet. +func (c *EnrollCmd) Execute() error { + cmd := fleetapi.NewEnrollCmd(c.client) + + metadata, err := metadata() + if err != nil { + return errors.New(err, "acquiring hostname") + } + + r := &fleetapi.EnrollRequest{ + EnrollAPIKey: c.options.EnrollAPIKey, + SharedID: c.options.ID, + Type: fleetapi.PermanentEnroll, + Metadata: fleetapi.Metadata{ + Local: metadata, + UserProvided: c.options.UserProvidedMetadata, + }, + } + + resp, err := cmd.Execute(context.Background(), r) + if err != nil { + return errors.New(err, + "fail to execute request to Kibana", + errors.TypeNetwork) + } + + fleetConfig, err := createFleetConfigFromEnroll(&APIAccess{ + AccessAPIKey: resp.Item.AccessAPIKey, + Kibana: c.kibanaConfig, + }) + + reader, err := yamlToReader(fleetConfig) + if err != nil { + return err + } + + if err := c.configStore.Save(reader); err != nil { + return errors.New(err, "could not save enrollment information", errors.TypeFilesystem) + } + + if _, err := info.NewAgentInfo(); err != nil { + return err + } + + return nil +} + +func yamlToReader(in interface{}) (io.Reader, error) { + data, err := yaml.Marshal(in) + if err != nil { + return nil, errors.New(err, "could not marshal to YAML") + } + return bytes.NewReader(data), nil +} diff --git a/x-pack/agent/pkg/agent/application/enroll_cmd_test.go b/x-pack/agent/pkg/agent/application/enroll_cmd_test.go new file mode 100644 index 00000000000..519de8b99e8 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/enroll_cmd_test.go @@ -0,0 +1,400 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "bytes" + "crypto/tls" + "io" + "io/ioutil" + "net" + "net/http" + "os" + "runtime" + "strconv" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/authority" +) + +type mockStore struct { + Err error + Called bool + Content []byte +} + +func (m *mockStore) Save(in io.Reader) error { + m.Called = true + if m.Err != nil { + return m.Err + } + + buf := new(bytes.Buffer) + io.Copy(buf, in) + m.Content = buf.Bytes() + return nil +} + +func TestEnroll(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Disabled under windows: https://github.com/elastic/beats/issues/16860") + } + + log, _ := logger.New() + + t.Run("fail to save is propagated", withTLSServer( + func(t *testing.T) *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("/api/ingest_manager/fleet/agents/enroll", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(` +{ + "action": "created", + "success": true, + "item": { + "id": "a9328860-ec54-11e9-93c4-d72ab8a69391", + "active": true, + "policy_id": "69f3f5a0-ec52-11e9-93c4-d72ab8a69391", + "type": "PERMANENT", + "enrolled_at": "2019-10-11T18:26:37.158Z", + "user_provided_metadata": { + "custom": "customize" + }, + "local_metadata": { + "platform": "linux", + "version": "8.0.0" + }, + "actions": [], + "access_api_key": "my-access-token" + } +}`)) + }) + return mux + }, func(t *testing.T, caBytes []byte, host string) { + caFile, err := bytesToTMPFile(caBytes) + require.NoError(t, err) + defer os.Remove(caFile) + + url := "https://" + host + store := &mockStore{Err: errors.New("fail to save")} + cmd, err := NewEnrollCmdWithStore( + log, + &EnrollCmdOption{ + ID: "my-id", + URL: url, + CAs: []string{caFile}, + EnrollAPIKey: "my-enrollment-token", + UserProvidedMetadata: map[string]interface{}{"custom": "customize"}, + }, + "", + store, + ) + require.NoError(t, err) + + err = cmd.Execute() + require.Error(t, err) + }, + )) + + t.Run("successfully enroll with TLS and save access api key in the store", withTLSServer( + func(t *testing.T) *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("/api/ingest_manager/fleet/agents/enroll", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(` +{ + "action": "created", + "success": true, + "item": { + "id": "a9328860-ec54-11e9-93c4-d72ab8a69391", + "active": true, + "policy_id": "69f3f5a0-ec52-11e9-93c4-d72ab8a69391", + "type": "PERMANENT", + "enrolled_at": "2019-10-11T18:26:37.158Z", + "user_provided_metadata": { + "custom": "customize" + }, + "local_metadata": { + "platform": "linux", + "version": "8.0.0" + }, + "actions": [], + "access_api_key": "my-access-api-key" + } +}`)) + }) + return mux + }, func(t *testing.T, caBytes []byte, host string) { + caFile, err := bytesToTMPFile(caBytes) + require.NoError(t, err) + defer os.Remove(caFile) + + url := "https://" + host + store := &mockStore{} + cmd, err := NewEnrollCmdWithStore( + log, + &EnrollCmdOption{ + ID: "my-id", + URL: url, + CAs: []string{caFile}, + EnrollAPIKey: "my-enrollment-api-key", + UserProvidedMetadata: map[string]interface{}{"custom": "customize"}, + }, + "", + store, + ) + require.NoError(t, err) + + err = cmd.Execute() + require.NoError(t, err) + + config, err := readConfig(store.Content) + + require.NoError(t, err) + require.Equal(t, "my-access-api-key", config.API.AccessAPIKey) + require.Equal(t, host, config.API.Kibana.Host) + require.Equal(t, "", config.API.Kibana.Username) + require.Equal(t, "", config.API.Kibana.Password) + }, + )) + + t.Run("successfully enroll when a slash is defined at the end of host", withServer( + func(t *testing.T) *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("/api/ingest_manager/fleet/agents/enroll", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(` +{ + "action": "created", + "success": true, + "item": { + "id": "a9328860-ec54-11e9-93c4-d72ab8a69391", + "active": true, + "policy_id": "69f3f5a0-ec52-11e9-93c4-d72ab8a69391", + "type": "PERMANENT", + "enrolled_at": "2019-10-11T18:26:37.158Z", + "user_provided_metadata": { + "custom": "customize" + }, + "local_metadata": { + "platform": "linux", + "version": "8.0.0" + }, + "actions": [], + "access_api_key": "my-access-api-key" + } +}`)) + }) + return mux + }, func(t *testing.T, host string) { + url := "http://" + host + "/" + store := &mockStore{} + cmd, err := NewEnrollCmdWithStore( + log, + &EnrollCmdOption{ + ID: "my-id", + URL: url, + CAs: []string{}, + EnrollAPIKey: "my-enrollment-api-key", + UserProvidedMetadata: map[string]interface{}{"custom": "customize"}, + }, + "", + store, + ) + require.NoError(t, err) + + err = cmd.Execute() + require.NoError(t, err) + + require.True(t, store.Called) + + config, err := readConfig(store.Content) + + require.NoError(t, err) + require.Equal(t, "my-access-api-key", config.API.AccessAPIKey) + require.Equal(t, host, config.API.Kibana.Host) + require.Equal(t, "", config.API.Kibana.Username) + require.Equal(t, "", config.API.Kibana.Password) + }, + )) + + t.Run("successfully enroll without TLS and save access api key in the store", withServer( + func(t *testing.T) *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("/api/ingest_manager/fleet/agents/enroll", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(` +{ + "action": "created", + "success": true, + "item": { + "id": "a9328860-ec54-11e9-93c4-d72ab8a69391", + "active": true, + "policy_id": "69f3f5a0-ec52-11e9-93c4-d72ab8a69391", + "type": "PERMANENT", + "enrolled_at": "2019-10-11T18:26:37.158Z", + "user_provided_metadata": { + "custom": "customize" + }, + "local_metadata": { + "platform": "linux", + "version": "8.0.0" + }, + "actions": [], + "access_api_key": "my-access-api-key" + } +}`)) + }) + return mux + }, func(t *testing.T, host string) { + url := "http://" + host + store := &mockStore{} + cmd, err := NewEnrollCmdWithStore( + log, + &EnrollCmdOption{ + ID: "my-id", + URL: url, + CAs: []string{}, + EnrollAPIKey: "my-enrollment-api-key", + UserProvidedMetadata: map[string]interface{}{"custom": "customize"}, + }, + "", + store, + ) + require.NoError(t, err) + + err = cmd.Execute() + require.NoError(t, err) + + require.True(t, store.Called) + + config, err := readConfig(store.Content) + + require.NoError(t, err) + require.Equal(t, "my-access-api-key", config.API.AccessAPIKey) + require.Equal(t, host, config.API.Kibana.Host) + require.Equal(t, "", config.API.Kibana.Username) + require.Equal(t, "", config.API.Kibana.Password) + }, + )) + + t.Run("fail to enroll without TLS", withServer( + func(t *testing.T) *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("/api/ingest_manager/fleet/agents/enroll", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(` +{ + "statusCode": 500, + "error": "Internal Server Error" +}`)) + }) + return mux + }, func(t *testing.T, host string) { + url := "http://" + host + store := &mockStore{} + cmd, err := NewEnrollCmdWithStore( + log, + &EnrollCmdOption{ + ID: "my-id", + URL: url, + CAs: []string{}, + EnrollAPIKey: "my-enrollment-token", + UserProvidedMetadata: map[string]interface{}{"custom": "customize"}, + }, + "", + store, + ) + require.NoError(t, err) + + err = cmd.Execute() + require.Error(t, err) + require.False(t, store.Called) + }, + )) +} + +func withServer( + m func(t *testing.T) *http.ServeMux, + test func(t *testing.T, host string), +) func(t *testing.T) { + return func(t *testing.T) { + listener, err := net.Listen("tcp", ":0") + require.NoError(t, err) + defer listener.Close() + + port := listener.Addr().(*net.TCPAddr).Port + + go http.Serve(listener, m(t)) + + test(t, "localhost:"+strconv.Itoa(port)) + } +} + +func withTLSServer( + m func(t *testing.T) *http.ServeMux, + test func(t *testing.T, caBytes []byte, host string), +) func(t *testing.T) { + return func(t *testing.T) { + + ca, err := authority.NewCA() + require.NoError(t, err) + pair, err := ca.GeneratePair() + require.NoError(t, err) + + serverCert, err := tls.X509KeyPair(pair.Crt, pair.Key) + require.NoError(t, err) + + listener, err := net.Listen("tcp", ":0") + require.NoError(t, err) + defer listener.Close() + + port := listener.Addr().(*net.TCPAddr).Port + + s := http.Server{ + Handler: m(t), + TLSConfig: &tls.Config{ + Certificates: []tls.Certificate{serverCert}, + }, + } + + // Uses the X509KeyPair pair defined in the TLSConfig struct instead of file on disk. + go s.ServeTLS(listener, "", "") + + test(t, ca.Crt(), "localhost:"+strconv.Itoa(port)) + } +} + +func bytesToTMPFile(b []byte) (string, error) { + f, err := ioutil.TempFile("", "prefix") + if err != nil { + return "", err + } + f.Write(b) + if err := f.Close(); err != nil { + return "", err + } + + return f.Name(), nil +} + +func readConfig(raw []byte) (*FleetAgentConfig, error) { + r := bytes.NewReader(raw) + config, err := config.NewConfigFrom(r) + if err != nil { + return nil, err + } + + cfg := defaultFleetAgentConfig() + if err := config.Unpack(cfg); err != nil { + return nil, err + } + return cfg, nil +} diff --git a/x-pack/agent/pkg/agent/application/error.go b/x-pack/agent/pkg/agent/application/error.go new file mode 100644 index 00000000000..6905711cf94 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/error.go @@ -0,0 +1,15 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import "github.com/pkg/errors" + +var ( + // ErrInvalidPeriod is returned when a reload period interval is not valid + ErrInvalidPeriod = errors.New("period must be higher than zero") + + // ErrInvalidMgmtMode is returned when an unknown mode is selected. + ErrInvalidMgmtMode = errors.New("invalid management mode") +) diff --git a/x-pack/agent/pkg/agent/application/fleet_acker.go b/x-pack/agent/pkg/agent/application/fleet_acker.go new file mode 100644 index 00000000000..4f101377596 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/fleet_acker.go @@ -0,0 +1,94 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + "fmt" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" + "github.com/elastic/beats/v7/x-pack/agent/pkg/scheduler" +) + +type actionAcker struct { + log *logger.Logger + dispatcher dispatcher + client clienter + scheduler scheduler.Scheduler + agentInfo agentInfo + reporter fleetReporter + done chan struct{} +} + +func newActionAcker( + log *logger.Logger, + agentInfo agentInfo, + client clienter, +) (*actionAcker, error) { + return &actionAcker{ + log: log, + client: client, + agentInfo: agentInfo, + }, nil +} + +func (f *actionAcker) Ack(ctx context.Context, action fleetapi.Action) error { + // checkin + cmd := fleetapi.NewAckCmd(f.agentInfo, f.client) + req := &fleetapi.AckRequest{ + AgentID: f.agentInfo.AgentID(), + Actions: []string{ + action.ID(), + }, + } + + _, err := cmd.Execute(ctx, req) + if err != nil { + return errors.New(err, fmt.Sprintf("acknowledge action '%s' failed", action.ID()), errors.TypeNetwork) + } + + return nil +} + +func (f *actionAcker) AckBatch(ctx context.Context, actions []fleetapi.Action) error { + // checkin + ids := make([]string, 0, len(actions)) + for _, action := range actions { + ids = append(ids, action.ID()) + } + + cmd := fleetapi.NewAckCmd(f.agentInfo, f.client) + req := &fleetapi.AckRequest{ + Actions: ids, + } + + _, err := cmd.Execute(ctx, req) + if err != nil { + return errors.New(err, fmt.Sprintf("acknowledge %d actions '%v' failed", len(actions), actions), errors.TypeNetwork) + } + + return nil +} + +func (f *actionAcker) Commit(ctx context.Context) error { + return nil +} + +type noopAcker struct{} + +func newNoopAcker() *noopAcker { + return &noopAcker{} +} + +func (f *noopAcker) Ack(ctx context.Context, action fleetapi.Action) error { + return nil +} + +func (*noopAcker) Commit(ctx context.Context) error { return nil } + +var _ fleetAcker = &actionAcker{} +var _ fleetAcker = &noopAcker{} diff --git a/x-pack/agent/pkg/agent/application/fleet_acker_test.go b/x-pack/agent/pkg/agent/application/fleet_acker_test.go new file mode 100644 index 00000000000..c440ffab493 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/fleet_acker_test.go @@ -0,0 +1,66 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" +) + +func TestAcker(t *testing.T) { + type ackRequest struct { + Actions []string `json:"action_ids"` + } + + log, _ := logger.New() + client := newTestingClient() + agentInfo := &testAgentInfo{} + acker, err := newActionAcker(log, agentInfo, client) + if err != nil { + t.Fatal(err) + } + + if acker == nil { + t.Fatal("acker not initialized") + } + + testID := "ack-test-action-id" + testAction := &fleetapi.ActionUnknown{ActionID: testID} + + ch := client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { + content, err := ioutil.ReadAll(body) + assert.NoError(t, err) + cr := &ackRequest{} + err = json.Unmarshal(content, &cr) + assert.NoError(t, err) + + assert.EqualValues(t, 1, len(cr.Actions)) + assert.EqualValues(t, testID, cr.Actions[0]) + + resp := wrapStrToResp(http.StatusOK, `{ "actions": [], "success": true }`) + return resp, nil + }) + + go func() { + for range ch { + } + }() + + if err := acker.Ack(context.Background(), testAction); err != nil { + t.Fatal(err) + } + if err := acker.Commit(context.Background()); err != nil { + t.Fatal(err) + } +} diff --git a/x-pack/agent/pkg/agent/application/fleet_gateway.go b/x-pack/agent/pkg/agent/application/fleet_gateway.go new file mode 100644 index 00000000000..dd6b559972f --- /dev/null +++ b/x-pack/agent/pkg/agent/application/fleet_gateway.go @@ -0,0 +1,217 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + "sync" + "time" + + "github.com/elastic/beats/v7/libbeat/common/backoff" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" + "github.com/elastic/beats/v7/x-pack/agent/pkg/scheduler" +) + +type dispatcher interface { + Dispatch(acker fleetAcker, actions ...action) error +} + +type agentInfo interface { + AgentID() string +} + +type fleetReporter interface { + Events() ([]fleetapi.SerializableEvent, func()) +} + +type fleetAcker interface { + Ack(ctx context.Context, action fleetapi.Action) error + Commit(ctx context.Context) error +} + +// fleetGateway is a gateway between the Agent and the Fleet API, it's take cares of all the +// bidirectional communication requirements. The gateway aggregates events and will periodically +// call the API to send the events and will receive actions to be executed locally. +// The only supported action for now is a "ActionPolicyChange". +type fleetGateway struct { + bgContext context.Context + log *logger.Logger + dispatcher dispatcher + client clienter + scheduler scheduler.Scheduler + backoff backoff.Backoff + settings *fleetGatewaySettings + agentInfo agentInfo + reporter fleetReporter + done chan struct{} + wg sync.WaitGroup + acker fleetAcker +} + +type fleetGatewaySettings struct { + Duration time.Duration + Jitter time.Duration + Backoff backoffSettings +} + +type backoffSettings struct { + Init time.Duration + Max time.Duration +} + +func newFleetGateway( + ctx context.Context, + log *logger.Logger, + settings *fleetGatewaySettings, + agentInfo agentInfo, + client clienter, + d dispatcher, + r fleetReporter, + acker fleetAcker, +) (*fleetGateway, error) { + scheduler := scheduler.NewPeriodicJitter(settings.Duration, settings.Jitter) + return newFleetGatewayWithScheduler( + ctx, + log, + settings, + agentInfo, + client, + d, + scheduler, + r, + acker, + ) +} + +func newFleetGatewayWithScheduler( + ctx context.Context, + log *logger.Logger, + settings *fleetGatewaySettings, + agentInfo agentInfo, + client clienter, + d dispatcher, + scheduler scheduler.Scheduler, + r fleetReporter, + acker fleetAcker, +) (*fleetGateway, error) { + done := make(chan struct{}) + + return &fleetGateway{ + bgContext: ctx, + log: log, + dispatcher: d, + client: client, + settings: settings, + agentInfo: agentInfo, + scheduler: scheduler, + backoff: backoff.NewEqualJitterBackoff( + done, + settings.Backoff.Init, + settings.Backoff.Max, + ), + done: done, + reporter: r, + acker: acker, + }, nil +} + +func (f *fleetGateway) worker() { + for { + select { + case <-f.scheduler.WaitTick(): + f.log.Debug("FleetGateway calling Checkin API") + + // Execute the checkin call and for any errors returned by the fleet API + // the function will retry to communicate with fleet with an exponential delay and some + // jitter to help better distribute the load from a fleet of agents. + resp, err := f.doExecute() + if err != nil { + f.log.Error(err) + continue + } + + actions := make([]action, len(resp.Actions)) + for idx, a := range resp.Actions { + actions[idx] = a + } + + if err := f.dispatcher.Dispatch(f.acker, actions...); err != nil { + f.log.Errorf("failed to dispatch actions, error: %s", err) + } + + f.log.Debugf("FleetGateway is sleeping, next update in %s", f.settings.Duration) + case <-f.done: + return + case <-f.bgContext.Done(): + f.Stop() + return + } + } +} + +func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { + f.backoff.Reset() + for { + // TODO: wrap with timeout context + resp, err := f.execute(f.bgContext) + if err != nil { + f.log.Errorf("Could not communicate with Checking API will retry, error: %s", err) + if !f.backoff.Wait() { + return nil, errors.New( + "execute retry loop was stopped", + errors.TypeNetwork, + errors.M(errors.MetaKeyURI, f.client.URI()), + ) + } + continue + } + return resp, nil + } +} + +func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, error) { + // get events + ee, ack := f.reporter.Events() + + var metaData map[string]interface{} + if m, err := metadata(); err == nil { + metaData = m + } + + // checkin + cmd := fleetapi.NewCheckinCmd(f.agentInfo, f.client) + req := &fleetapi.CheckinRequest{ + Events: ee, + Metadata: metaData, + } + + resp, err := cmd.Execute(ctx, req) + if err != nil { + return nil, err + } + + // ack events so they are dropped from queue + ack() + return resp, nil +} + +func (f *fleetGateway) Start() { + f.wg.Add(1) + go func(wg *sync.WaitGroup) { + defer f.log.Info("Fleet gateway is stopped") + defer wg.Done() + + f.worker() + }(&f.wg) +} + +func (f *fleetGateway) Stop() { + f.log.Info("Fleet gateway is stopping") + defer f.scheduler.Stop() + close(f.done) + f.wg.Wait() +} diff --git a/x-pack/agent/pkg/agent/application/fleet_gateway_test.go b/x-pack/agent/pkg/agent/application/fleet_gateway_test.go new file mode 100644 index 00000000000..68a3dc8d4c2 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/fleet_gateway_test.go @@ -0,0 +1,458 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sync" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + repo "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter" + fleetreporter "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter/fleet" + "github.com/elastic/beats/v7/x-pack/agent/pkg/scheduler" +) + +type clientCallbackFunc func(headers http.Header, body io.Reader) (*http.Response, error) + +type testingClient struct { + sync.Mutex + callback clientCallbackFunc + received chan struct{} +} + +func (t *testingClient) Send( + _ context.Context, + method string, + path string, + params url.Values, + headers http.Header, + body io.Reader, +) (*http.Response, error) { + t.Lock() + defer t.Unlock() + defer func() { t.received <- struct{}{} }() + return t.callback(headers, body) +} + +func (t *testingClient) URI() string { + return "http://localhost" +} + +func (t *testingClient) Answer(fn clientCallbackFunc) <-chan struct{} { + t.Lock() + defer t.Unlock() + t.callback = fn + return t.received +} + +func newTestingClient() *testingClient { + return &testingClient{received: make(chan struct{})} +} + +type testingDispatcherFunc func(...action) error + +type testingDispatcher struct { + sync.Mutex + callback testingDispatcherFunc + received chan struct{} +} + +func (t *testingDispatcher) Dispatch(acker fleetAcker, actions ...action) error { + t.Lock() + defer t.Unlock() + defer func() { t.received <- struct{}{} }() + return t.callback(actions...) +} + +func (t *testingDispatcher) Answer(fn testingDispatcherFunc) <-chan struct{} { + t.Lock() + defer t.Unlock() + t.callback = fn + return t.received +} + +func newTestingDispatcher() *testingDispatcher { + return &testingDispatcher{received: make(chan struct{})} +} + +type withGatewayFunc func(*testing.T, *fleetGateway, *testingClient, *testingDispatcher, *scheduler.Stepper, repo.Backend) + +func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGatewayFunc) func(t *testing.T) { + return func(t *testing.T) { + scheduler := scheduler.NewStepper() + client := newTestingClient() + dispatcher := newTestingDispatcher() + + log, _ := logger.New() + rep := getReporter(agentInfo, log, t) + + gateway, err := newFleetGatewayWithScheduler( + context.Background(), + log, + settings, + agentInfo, + client, + dispatcher, + scheduler, + rep, + newNoopAcker(), + ) + + go gateway.Start() + defer gateway.Stop() + + require.NoError(t, err) + + fn(t, gateway, client, dispatcher, scheduler, rep) + } +} + +func ackSeq(channels ...<-chan struct{}) <-chan struct{} { + comm := make(chan struct{}) + go func(comm chan struct{}) { + for _, c := range channels { + <-c + } + comm <- struct{}{} + }(comm) + return comm +} + +func wrapStrToResp(code int, body string) *http.Response { + return &http.Response{ + Status: fmt.Sprintf("%d %s", code, http.StatusText(code)), + StatusCode: code, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Body: ioutil.NopCloser(bytes.NewBufferString(body)), + ContentLength: int64(len(body)), + Header: make(http.Header, 0), + } +} + +func TestFleetGateway(t *testing.T) { + t.Skip("Concurrency issue see https://github.com/elastic/beats/issues/16771 for a stacktrace") + + agentInfo := &testAgentInfo{} + settings := &fleetGatewaySettings{ + Duration: 5 * time.Second, + Backoff: backoffSettings{Init: 1 * time.Second, Max: 5 * time.Second}, + } + + t.Run("send no event and receive no action", withGateway(agentInfo, settings, func( + t *testing.T, + gateway *fleetGateway, + client *testingClient, + dispatcher *testingDispatcher, + scheduler *scheduler.Stepper, + rep repo.Backend, + ) { + received := ackSeq( + client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { + resp := wrapStrToResp(http.StatusOK, `{ "actions": [], "success": true }`) + return resp, nil + }), + dispatcher.Answer(func(actions ...action) error { + require.Equal(t, 0, len(actions)) + return nil + }), + ) + + // Synchronize scheduler and acking of calls from the worker go routine. + scheduler.Next() + <-received + })) + + t.Run("Successfully connects and receives a series of actions", withGateway(agentInfo, settings, func( + t *testing.T, + gateway *fleetGateway, + client *testingClient, + dispatcher *testingDispatcher, + scheduler *scheduler.Stepper, + rep repo.Backend, + ) { + received := ackSeq( + client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { + // TODO: assert no events + resp := wrapStrToResp(http.StatusOK, ` +{ + "actions": [ + { + "type": "POLICY_CHANGE", + "id": "id1", + "data": { + "policy": { + "id": "policy-id" + } + } + }, + { + "type": "ANOTHER_ACTION", + "id": "id2" + } + ], + "success": true +} +`) + return resp, nil + }), + dispatcher.Answer(func(actions ...action) error { + require.Equal(t, 2, len(actions)) + return nil + }), + ) + + scheduler.Next() + <-received + })) + + // Test the normal time based execution. + t.Run("Periodically communicates with Fleet", func(t *testing.T) { + scheduler := scheduler.NewPeriodic(1 * time.Second) + client := newTestingClient() + dispatcher := newTestingDispatcher() + + log, _ := logger.New() + gateway, err := newFleetGatewayWithScheduler( + context.Background(), + log, + settings, + agentInfo, + client, + dispatcher, + scheduler, + getReporter(agentInfo, log, t), + newNoopAcker(), + ) + + go gateway.Start() + defer gateway.Stop() + + require.NoError(t, err) + + var count int + for { + received := ackSeq( + client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { + resp := wrapStrToResp(http.StatusOK, `{ "actions": [], "success": true }`) + return resp, nil + }), + dispatcher.Answer(func(actions ...action) error { + require.Equal(t, 0, len(actions)) + return nil + }), + ) + + <-received + count++ + if count == 5 { + return + } + } + }) + + t.Run("send event and receive no action", withGateway(agentInfo, settings, func( + t *testing.T, + gateway *fleetGateway, + client *testingClient, + dispatcher *testingDispatcher, + scheduler *scheduler.Stepper, + rep repo.Backend, + ) { + rep.Report(context.Background(), &testStateEvent{}) + received := ackSeq( + client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { + cr := &request{} + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal(content, &cr) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, 1, len(cr.Events)) + + resp := wrapStrToResp(http.StatusOK, `{ "actions": [], "success": true }`) + return resp, nil + }), + dispatcher.Answer(func(actions ...action) error { + require.Equal(t, 0, len(actions)) + return nil + }), + ) + + // Synchronize scheduler and acking of calls from the worker go routine. + scheduler.Next() + <-received + })) + + t.Run("Test the wait loop is interruptible", func(t *testing.T) { + d := 10 * time.Minute + scheduler := scheduler.NewPeriodic(d) + client := newTestingClient() + dispatcher := newTestingDispatcher() + + log, _ := logger.New() + gateway, err := newFleetGatewayWithScheduler( + context.Background(), + log, + &fleetGatewaySettings{ + Duration: d, + Backoff: backoffSettings{Init: 1 * time.Second, Max: 30 * time.Second}, + }, + agentInfo, + client, + dispatcher, + scheduler, + getReporter(agentInfo, log, t), + newNoopAcker(), + ) + + go gateway.Start() + defer gateway.Stop() + + require.NoError(t, err) + + // Silently dispatch action. + go func() { <-dispatcher.Answer(func(actions ...action) error { return nil }) }() + + // Make sure that all API calls to the checkin API are successfull, the following will happen: + // 1. Gateway -> checking api. + // 2. WaitTick() will block for 10 minutes. + // 3. Stop will unblock the Wait. + <-client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { + resp := wrapStrToResp(http.StatusOK, `{ "actions": [], "success": true }`) + return resp, nil + }) + }) +} + +func TestRetriesOnFailures(t *testing.T) { + agentInfo := &testAgentInfo{} + settings := &fleetGatewaySettings{ + Duration: 5 * time.Second, + Backoff: backoffSettings{Init: 1 * time.Second, Max: 5 * time.Second}, + } + + t.Run("When the gateway fails to communicate with the checkin API we will retry", + withGateway(agentInfo, settings, func( + t *testing.T, + gateway *fleetGateway, + client *testingClient, + dispatcher *testingDispatcher, + scheduler *scheduler.Stepper, + rep repo.Backend, + ) { + rep.Report(context.Background(), &testStateEvent{}) + + fail := func(_ http.Header, _ io.Reader) (*http.Response, error) { + return wrapStrToResp(http.StatusInternalServerError, "something is bad"), nil + } + + // Initial tick is done out of bound so we can block on channels. + go scheduler.Next() + + // Simulate a 500 errors for the next 3 calls. + <-client.Answer(fail) + <-client.Answer(fail) + <-client.Answer(fail) + + // API recover + received := ackSeq( + client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { + cr := &request{} + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal(content, &cr) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, 1, len(cr.Events)) + + resp := wrapStrToResp(http.StatusOK, `{ "actions": [], "success": true }`) + return resp, nil + }), + + dispatcher.Answer(func(actions ...action) error { + require.Equal(t, 0, len(actions)) + return nil + }), + ) + + <-received + })) + + t.Run("The retry loop is interruptible", + withGateway(agentInfo, &fleetGatewaySettings{ + Duration: 0 * time.Second, + Backoff: backoffSettings{Init: 10 * time.Minute, Max: 20 * time.Minute}, + }, func( + t *testing.T, + gateway *fleetGateway, + client *testingClient, + dispatcher *testingDispatcher, + scheduler *scheduler.Stepper, + rep repo.Backend, + ) { + rep.Report(context.Background(), &testStateEvent{}) + + fail := func(_ http.Header, _ io.Reader) (*http.Response, error) { + return wrapStrToResp(http.StatusInternalServerError, "something is bad"), nil + } + + // Initial tick is done out of bound so we can block on channels. + go scheduler.Next() + + // Fail to enter retry loop, all other calls will fails and will force to wait on big initial + // delay. + <-client.Answer(fail) + + // non-obvious but withGateway on return will stop the gateway before returning and we should + // exit the retry loop. The init value of the backoff is set to exceed the test default timeout. + })) +} + +func getReporter(info agentInfo, log *logger.Logger, t *testing.T) *fleetreporter.Reporter { + fleetR, err := fleetreporter.NewReporter(info, log, fleetreporter.DefaultFleetManagementConfig()) + if err != nil { + t.Fatal(errors.Wrap(err, "fail to create reporters")) + } + + return fleetR +} + +type testAgentInfo struct{} + +func (testAgentInfo) AgentID() string { return "agent-secret" } + +type testStateEvent struct{} + +func (testStateEvent) Type() string { return repo.EventTypeState } +func (testStateEvent) SubType() string { return repo.EventSubTypeInProgress } +func (testStateEvent) Time() time.Time { return time.Unix(0, 1) } +func (testStateEvent) Message() string { return "hello" } +func (testStateEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} } + +type request struct { + Events []interface{} `json:"events"` +} diff --git a/x-pack/agent/pkg/agent/application/global_config.go b/x-pack/agent/pkg/agent/application/global_config.go new file mode 100644 index 00000000000..8dd956d71c3 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/global_config.go @@ -0,0 +1,60 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "os" + "path/filepath" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" +) + +var ( + homePath string + dataPath string +) + +func init() { + homePath = retrieveExecutablePath() + dataPath = retrieveDataPath() +} + +// InjectAgentConfig injects config to a provided configuration. +func InjectAgentConfig(c *config.Config) error { + globalConfig := AgentGlobalConfig() + if err := c.Merge(globalConfig); err != nil { + return errors.New("failed to inject agent global config", err, errors.TypeConfig) + } + + return nil +} + +// AgentGlobalConfig gets global config used for resolution of variables inside configuration +// such as ${path.data}. +func AgentGlobalConfig() map[string]interface{} { + return map[string]interface{}{ + "path": map[string]interface{}{ + "data": dataPath, + "home": homePath, + }, + } +} + +// retrieveExecutablePath returns a directory where binary lives +// Executable is not supported on nacl. +func retrieveExecutablePath() string { + execPath, err := os.Executable() + if err != nil { + panic(err) + } + + return filepath.Dir(execPath) +} + +// retrieveHomePath returns a home directory of current user +func retrieveDataPath() string { + return filepath.Join(retrieveExecutablePath(), "data") +} diff --git a/x-pack/agent/pkg/agent/application/handler_action_policy_change.go b/x-pack/agent/pkg/agent/application/handler_action_policy_change.go new file mode 100644 index 00000000000..2c50a3b4d08 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/handler_action_policy_change.go @@ -0,0 +1,40 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + "fmt" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" +) + +type handlerPolicyChange struct { + log *logger.Logger + emitter emitterFunc +} + +func (h *handlerPolicyChange) Handle(ctx context.Context, a action, acker fleetAcker) error { + h.log.Debugf("HandlerPolicyChange: action '%+v' received", a) + action, ok := a.(*fleetapi.ActionPolicyChange) + if !ok { + return fmt.Errorf("invalid type, expected ActionPolicyChange and received %T", a) + } + + c, err := config.NewConfigFrom(action.Policy) + if err != nil { + return errors.New(err, "could not parse the configuration from the policy", errors.TypeConfig) + } + + h.log.Debugf("HandlerPolicyChange: emit configuration for action %+v", a) + if err := h.emitter(c); err != nil { + return err + } + + return acker.Ack(ctx, action) +} diff --git a/x-pack/agent/pkg/agent/application/handler_action_policy_change_test.go b/x-pack/agent/pkg/agent/application/handler_action_policy_change_test.go new file mode 100644 index 00000000000..e18601d69f1 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/handler_action_policy_change_test.go @@ -0,0 +1,151 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + "sync" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" +) + +type mockEmitter struct { + err error + policy *config.Config +} + +func (m *mockEmitter) Emitter(policy *config.Config) error { + m.policy = policy + return m.err +} + +func TestPolicyChange(t *testing.T) { + log, _ := logger.New() + ack := newNoopAcker() + + t.Run("Receive a policy change and successfully emits a raw configuration", func(t *testing.T) { + emitter := &mockEmitter{} + + policy := map[string]interface{}{"hello": "world"} + action := &fleetapi.ActionPolicyChange{ + ActionID: "abc123", + ActionType: "POLICY_CHANGE", + Policy: policy, + } + + handler := &handlerPolicyChange{log: log, emitter: emitter.Emitter} + + err := handler.Handle(context.Background(), action, ack) + require.NoError(t, err) + require.Equal(t, config.MustNewConfigFrom(policy), emitter.policy) + }) + + t.Run("Receive a policy and fail to emits a raw configuration", func(t *testing.T) { + mockErr := errors.New("error returned") + emitter := &mockEmitter{err: mockErr} + + policy := map[string]interface{}{"hello": "world"} + action := &fleetapi.ActionPolicyChange{ + ActionID: "abc123", + ActionType: "POLICY_CHANGE", + Policy: policy, + } + + handler := &handlerPolicyChange{log: log, emitter: emitter.Emitter} + + err := handler.Handle(context.Background(), action, ack) + require.Error(t, err) + }) +} + +func TestPolicyAcked(t *testing.T) { + log, _ := logger.New() + t.Run("Policy change should not ACK on error", func(t *testing.T) { + tacker := &testAcker{} + + mockErr := errors.New("error returned") + emitter := &mockEmitter{err: mockErr} + + policy := map[string]interface{}{"hello": "world"} + actionID := "abc123" + action := &fleetapi.ActionPolicyChange{ + ActionID: actionID, + ActionType: "POLICY_CHANGE", + Policy: policy, + } + + handler := &handlerPolicyChange{log: log, emitter: emitter.Emitter} + + err := handler.Handle(context.Background(), action, tacker) + require.Error(t, err) + + actions := tacker.Items() + assert.EqualValues(t, 0, len(actions)) + }) + + t.Run("Policy change should ACK", func(t *testing.T) { + tacker := &testAcker{} + + emitter := &mockEmitter{} + + policy := map[string]interface{}{"hello": "world"} + actionID := "abc123" + action := &fleetapi.ActionPolicyChange{ + ActionID: actionID, + ActionType: "POLICY_CHANGE", + Policy: policy, + } + + handler := &handlerPolicyChange{log: log, emitter: emitter.Emitter} + + err := handler.Handle(context.Background(), action, tacker) + require.NoError(t, err) + + actions := tacker.Items() + assert.EqualValues(t, 1, len(actions)) + assert.Equal(t, actionID, actions[0]) + }) +} + +type testAcker struct { + acked []string + ackedLock sync.Mutex +} + +func (t *testAcker) Ack(_ context.Context, action fleetapi.Action) error { + t.ackedLock.Lock() + defer t.ackedLock.Unlock() + + if t.acked == nil { + t.acked = make([]string, 0) + } + + t.acked = append(t.acked, action.ID()) + return nil +} + +func (t *testAcker) Commit(_ context.Context) error { + return nil +} + +func (t *testAcker) Clear() { + t.ackedLock.Lock() + defer t.ackedLock.Unlock() + + t.acked = make([]string, 0) +} + +func (t *testAcker) Items() []string { + t.ackedLock.Lock() + defer t.ackedLock.Unlock() + return t.acked +} diff --git a/x-pack/agent/pkg/agent/application/handler_default.go b/x-pack/agent/pkg/agent/application/handler_default.go new file mode 100644 index 00000000000..885944520c6 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/handler_default.go @@ -0,0 +1,20 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +type handlerDefault struct { + log *logger.Logger +} + +func (h *handlerDefault) Handle(_ context.Context, a action, acker fleetAcker) error { + h.log.Errorf("HandlerDefault: action '%+v' received", a) + return nil +} diff --git a/x-pack/agent/pkg/agent/application/handler_unknown.go b/x-pack/agent/pkg/agent/application/handler_unknown.go new file mode 100644 index 00000000000..dccbf8bd282 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/handler_unknown.go @@ -0,0 +1,20 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +type handlerUnknown struct { + log *logger.Logger +} + +func (h *handlerUnknown) Handle(_ context.Context, a action, acker fleetAcker) error { + h.log.Errorf("HandlerUnknown: action '%+v' received", a) + return nil +} diff --git a/x-pack/agent/pkg/agent/application/info/agent_id.go b/x-pack/agent/pkg/agent/application/info/agent_id.go new file mode 100644 index 00000000000..17422808717 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/info/agent_id.go @@ -0,0 +1,142 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package info + +import ( + "bytes" + "fmt" + "io" + + "github.com/gofrs/uuid" + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/storage" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" +) + +// AgentConfigFile is a name of file used to store agent information +const AgentConfigFile = "fleet.yml" +const agentInfoKey = "agent_info" + +// AgentActionStoreFile is the file that will contains the action that can be replayed after restart. +const AgentActionStoreFile = "action_store.yml" + +type persistentAgentInfo struct { + ID string `json:"ID" yaml:"ID" config:"ID"` +} + +type ioStore interface { + Save(io.Reader) error + Load() (io.ReadCloser, error) +} + +func generateAgentID() (string, error) { + uid, err := uuid.NewV4() + if err != nil { + return "", fmt.Errorf("error while generating UUID for agent: %v", err) + } + + return uid.String(), nil +} + +func loadAgentInfo(forceUpdate bool) (*persistentAgentInfo, error) { + s := storage.NewEncryptedDiskStore(AgentConfigFile, []byte("")) + + agentinfo, err := getInfoFromStore(s) + if err != nil { + return nil, err + } + + if agentinfo != nil && !forceUpdate && agentinfo.ID != "" { + return agentinfo, nil + } + + agentinfo.ID, err = generateAgentID() + if err != nil { + return nil, err + } + + if err := updateAgentInfo(s, agentinfo); err != nil { + return nil, errors.New(err, "storing generated agent id", errors.TypeFilesystem) + } + + return agentinfo, nil +} + +func getInfoFromStore(s ioStore) (*persistentAgentInfo, error) { + reader, err := s.Load() + if err != nil { + return nil, err + } + + cfg, err := config.NewConfigFrom(reader) + if err != nil { + return nil, errors.New(err, + fmt.Sprintf("fail to read configuration %s for the agent", AgentConfigFile), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, AgentConfigFile)) + } + + configMap, err := cfg.ToMapStr() + if err != nil { + return nil, errors.New(err, + "failed to unpack stored config to map", + errors.TypeFilesystem) + } + + agentInfoSubMap, found := configMap[agentInfoKey] + if !found { + return &persistentAgentInfo{}, nil + } + + cc, err := config.NewConfigFrom(agentInfoSubMap) + if err != nil { + return nil, errors.New(err, "failed to create config from agent info submap") + } + + pid := &persistentAgentInfo{} + if err := cc.Unpack(&pid); err != nil { + return nil, errors.New(err, "failed to unpack stored config to map") + } + + return pid, nil +} + +func updateAgentInfo(s ioStore, agentInfo *persistentAgentInfo) error { + reader, err := s.Load() + if err != nil { + return err + } + + cfg, err := config.NewConfigFrom(reader) + if err != nil { + return errors.New(err, fmt.Sprintf("fail to read configuration %s for the agent", AgentConfigFile), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, AgentConfigFile)) + } + + configMap := make(map[string]interface{}) + if err := cfg.Unpack(&configMap); err != nil { + return errors.New(err, "failed to unpack stored config to map") + } + + configMap[agentInfoKey] = agentInfo + + r, err := yamlToReader(configMap) + if err != nil { + return err + } + + return s.Save(r) +} + +func yamlToReader(in interface{}) (io.Reader, error) { + data, err := yaml.Marshal(in) + if err != nil { + return nil, errors.New(err, "could not marshal to YAML") + } + return bytes.NewReader(data), nil +} diff --git a/x-pack/agent/pkg/agent/application/info/agent_info.go b/x-pack/agent/pkg/agent/application/info/agent_info.go new file mode 100644 index 00000000000..e990b83bd49 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/info/agent_info.go @@ -0,0 +1,46 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package info + +// AgentInfo is a collection of information about agent. +type AgentInfo struct { + agentID string +} + +// NewAgentInfo creates a new agent information. +// In case when agent ID was already created it returns, +// this created ID otherwise it generates +// new unique identifier for agent. +// If agent config file does not exist it gets created. +func NewAgentInfo() (*AgentInfo, error) { + agentInfo, err := loadAgentInfo(false) + if err != nil { + return nil, err + } + + return &AgentInfo{ + agentID: agentInfo.ID, + }, nil +} + +// ForceNewAgentInfo creates a new agent information. +// Generates new unique identifier for agent regardless +// of any existing ID. +// If agent config file does not exist it gets created. +func ForceNewAgentInfo() (*AgentInfo, error) { + agentInfo, err := loadAgentInfo(true) + if err != nil { + return nil, err + } + + return &AgentInfo{ + agentID: agentInfo.ID, + }, nil +} + +// AgentID returns an agent identifier. +func (i *AgentInfo) AgentID() string { + return i.agentID +} diff --git a/x-pack/agent/pkg/agent/application/lazy_acker.go b/x-pack/agent/pkg/agent/application/lazy_acker.go new file mode 100644 index 00000000000..ea32dc6b869 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/lazy_acker.go @@ -0,0 +1,54 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" +) + +type batchAcker interface { + AckBatch(ctx context.Context, actions []fleetapi.Action) error +} + +type ackForcer interface { + ForceAck() +} + +type lazyAcker struct { + acker batchAcker + queue []fleetapi.Action +} + +func newLazyAcker(baseAcker batchAcker) *lazyAcker { + return &lazyAcker{ + acker: baseAcker, + queue: make([]fleetapi.Action, 0), + } +} + +func (f *lazyAcker) Ack(ctx context.Context, action fleetapi.Action) error { + f.queue = append(f.queue, action) + + if _, isAckForced := action.(ackForcer); isAckForced { + return f.Commit(ctx) + } + + return nil +} + +func (f *lazyAcker) Commit(ctx context.Context) error { + err := f.acker.AckBatch(ctx, f.queue) + if err != nil { + // do not cleanup on error + return err + } + + f.queue = make([]fleetapi.Action, 0) + return nil +} + +var _ fleetAcker = &lazyAcker{} diff --git a/x-pack/agent/pkg/agent/application/lazy_acker_test.go b/x-pack/agent/pkg/agent/application/lazy_acker_test.go new file mode 100644 index 00000000000..257f828fe6f --- /dev/null +++ b/x-pack/agent/pkg/agent/application/lazy_acker_test.go @@ -0,0 +1,124 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" +) + +func TestLazyAcker(t *testing.T) { + type ackRequest struct { + Actions []string `json:"action_ids"` + } + + log, _ := logger.New() + client := newTestingClient() + agentInfo := &testAgentInfo{} + acker, err := newActionAcker(log, agentInfo, client) + if err != nil { + t.Fatal(err) + } + + lacker := newLazyAcker(acker) + + if acker == nil { + t.Fatal("acker not initialized") + } + + testID1 := "ack-test-action-id" + testID2 := testID1 + "2" + testID3 := testID1 + "3" + testAction1 := &fleetapi.ActionUnknown{ActionID: testID1} + testAction2 := &actionImmediate{ActionID: testID2} + testAction3 := &fleetapi.ActionUnknown{ActionID: testID3} + + ch := client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { + content, err := ioutil.ReadAll(body) + assert.NoError(t, err) + cr := &ackRequest{} + err = json.Unmarshal(content, &cr) + assert.NoError(t, err) + + if len(cr.Actions) == 0 { + t.Fatal("expected events but got none") + } + if cr.Actions[0] == testID1 { + assert.EqualValues(t, 2, len(cr.Actions)) + assert.EqualValues(t, testID1, cr.Actions[0]) + assert.EqualValues(t, testID2, cr.Actions[1]) + + } else { + assert.EqualValues(t, 1, len(cr.Actions)) + } + + resp := wrapStrToResp(http.StatusOK, `{ "actions": [], "success": true }`) + return resp, nil + }) + + go func() { + for range ch { + } + }() + c := context.Background() + + if err := lacker.Ack(c, testAction1); err != nil { + t.Fatal(err) + } + if err := lacker.Ack(c, testAction2); err != nil { + t.Fatal(err) + } + if err := lacker.Ack(c, testAction3); err != nil { + t.Fatal(err) + } + if err := lacker.Commit(c); err != nil { + t.Fatal(err) + } + +} + +type actionImmediate struct { + ActionID string + ActionType string + originalType string +} + +// Type returns the type of the Action. +func (a *actionImmediate) Type() string { + return "IMMEDIATE" +} + +func (a *actionImmediate) ID() string { + return a.ActionID +} + +func (a *actionImmediate) ForceAck() {} + +func (a *actionImmediate) String() string { + var s strings.Builder + s.WriteString("action_id: ") + s.WriteString(a.ID()) + s.WriteString(", type: ") + s.WriteString(a.Type()) + s.WriteString(" (original type: ") + s.WriteString(a.OriginalType()) + s.WriteString(")") + return s.String() +} + +// OriginalType returns the original type of the action as returned by the API. +func (a *actionImmediate) OriginalType() string { + return a.originalType +} diff --git a/x-pack/agent/pkg/agent/application/local_meta.go b/x-pack/agent/pkg/agent/application/local_meta.go new file mode 100644 index 00000000000..af3c6b4c977 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/local_meta.go @@ -0,0 +1,25 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "os" + "runtime" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/release" +) + +func metadata() (map[string]interface{}, error) { + hostname, err := os.Hostname() + if err != nil { + return nil, err + } + + return map[string]interface{}{ + "platform": runtime.GOOS, + "version": release.Version(), + "host": hostname, + }, nil +} diff --git a/x-pack/agent/pkg/agent/application/local_mode.go b/x-pack/agent/pkg/agent/application/local_mode.go new file mode 100644 index 00000000000..8d1ac3a087d --- /dev/null +++ b/x-pack/agent/pkg/agent/application/local_mode.go @@ -0,0 +1,146 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/configrequest" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/dir" + reporting "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter" + logreporter "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter/log" +) + +type emitterFunc func(*config.Config) error + +// ConfigHandler is capable of handling config and perform actions at it. +type ConfigHandler interface { + HandleConfig(configrequest.Request) error +} + +type discoverFunc func() ([]string, error) + +// ErrNoConfiguration is returned when no configuration are found. +var ErrNoConfiguration = errors.New("no configuration found", errors.TypeConfig) + +// Local represents a standalone agents, that will read his configuration directly from disk. +// Some part of the configuration can be reloaded. +type Local struct { + bgContext context.Context + cancelCtxFn context.CancelFunc + log *logger.Logger + source source + agentInfo *info.AgentInfo +} + +type source interface { + Start() error + Stop() error +} + +// newLocal return a agent managed by local configuration. +func newLocal( + ctx context.Context, + log *logger.Logger, + pathConfigFile string, + config *config.Config, +) (*Local, error) { + var err error + if log == nil { + log, err = logger.New() + if err != nil { + return nil, err + } + } + agentInfo, err := info.NewAgentInfo() + if err != nil { + return nil, err + } + + c := localConfigDefault() + if err := config.Unpack(c); err != nil { + return nil, errors.New(err, "initialize local mode") + } + + logR := logreporter.NewReporter(log, c.Management.Reporting) + + localApplication := &Local{ + log: log, + agentInfo: agentInfo, + } + + localApplication.bgContext, localApplication.cancelCtxFn = context.WithCancel(ctx) + + reporter := reporting.NewReporter(localApplication.bgContext, log, localApplication.agentInfo, logR) + + router, err := newRouter(log, streamFactory(localApplication.bgContext, config, nil, reporter)) + if err != nil { + return nil, errors.New(err, "fail to initialize pipeline router") + } + + discover := discoverer(pathConfigFile, c.Management.Path) + emit := emitter(log, router, injectMonitoring) + + var cfgSource source + if !c.Management.Reload.Enabled { + log.Debug("Reloading of configuration is off") + cfgSource = newOnce(log, discover, emit) + } else { + log.Debugf("Reloading of configuration is on, frequency is set to %s", c.Management.Reload.Period) + cfgSource = newPeriodic(log, c.Management.Reload.Period, discover, emit) + } + + localApplication.source = cfgSource + + return localApplication, nil +} + +// Start starts a local agent. +func (l *Local) Start() error { + l.log.Info("Agent is starting") + defer l.log.Info("Agent is stopped") + + if err := l.source.Start(); err != nil { + return err + } + + return nil +} + +// Stop stops a local agent. +func (l *Local) Stop() error { + l.cancelCtxFn() + return l.source.Stop() +} + +// AgentInfo retrieves agent information. +func (l *Local) AgentInfo() *info.AgentInfo { + return l.agentInfo +} + +func discoverer(patterns ...string) discoverFunc { + var p []string + for _, newP := range patterns { + if len(newP) == 0 { + continue + } + + p = append(p, newP) + } + + if len(p) == 0 { + return func() ([]string, error) { + return []string{}, ErrNoConfiguration + } + } + + return func() ([]string, error) { + return dir.DiscoverFiles(p...) + } +} diff --git a/x-pack/agent/pkg/agent/application/managed_mode.go b/x-pack/agent/pkg/agent/application/managed_mode.go new file mode 100644 index 00000000000..8075aa9cd25 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/managed_mode.go @@ -0,0 +1,205 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/storage" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" + reporting "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter" + fleetreporter "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter/fleet" + logreporter "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter/log" +) + +var gatewaySettings = &fleetGatewaySettings{ + Duration: 2 * time.Second, + Jitter: 1 * time.Second, + Backoff: backoffSettings{ + Init: 1 * time.Second, + Max: 10 * time.Second, + }, +} + +type apiClient interface { + Send( + method string, + path string, + params url.Values, + headers http.Header, + body io.Reader, + ) (*http.Response, error) +} + +// Managed application, when the application is run in managed mode, most of the configuration are +// coming from the Fleet App. +type Managed struct { + bgContext context.Context + cancelCtxFn context.CancelFunc + log *logger.Logger + Config FleetAgentConfig + api apiClient + agentInfo *info.AgentInfo + gateway *fleetGateway +} + +func newManaged( + ctx context.Context, + log *logger.Logger, + rawConfig *config.Config, +) (*Managed, error) { + + agentInfo, err := info.NewAgentInfo() + if err != nil { + return nil, err + } + + path := fleetAgentConfigPath() + + // TODO(ph): Define the encryption password. + store := storage.NewEncryptedDiskStore(path, []byte("")) + reader, err := store.Load() + if err != nil { + return nil, errors.New(err, "could not initialize config store", + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, path)) + } + + config, err := config.NewConfigFrom(reader) + if err != nil { + return nil, errors.New(err, + fmt.Sprintf("fail to read configuration %s for the agent", path), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, path)) + } + + rawConfig.Merge(config) + + cfg := defaultFleetAgentConfig() + if err := config.Unpack(cfg); err != nil { + return nil, errors.New(err, + fmt.Sprintf("fail to unpack configuration from %s", path), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, path)) + } + + client, err := fleetapi.NewAuthWithConfig(log, cfg.API.AccessAPIKey, cfg.API.Kibana) + if err != nil { + return nil, errors.New(err, + "fail to create API client", + errors.TypeNetwork, + errors.M(errors.MetaKeyURI, cfg.API.Kibana.Host)) + } + + managedApplication := &Managed{ + log: log, + agentInfo: agentInfo, + } + + managedApplication.bgContext, managedApplication.cancelCtxFn = context.WithCancel(ctx) + + logR := logreporter.NewReporter(log, cfg.Reporting.Log) + fleetR, err := fleetreporter.NewReporter(agentInfo, log, cfg.Reporting.Fleet) + if err != nil { + return nil, errors.New(err, "fail to create reporters") + } + + combinedReporter := reporting.NewReporter(managedApplication.bgContext, log, agentInfo, logR, fleetR) + + router, err := newRouter(log, streamFactory(managedApplication.bgContext, rawConfig, client, combinedReporter)) + if err != nil { + return nil, errors.New(err, "fail to initialize pipeline router") + } + + emit := emitter(log, router) + acker, err := newActionAcker(log, agentInfo, client) + if err != nil { + return nil, err + } + + batchedAcker := newLazyAcker(acker) + + // Create the action store that will persist the last good policy change on disk. + actionStore, err := newActionStore(log, storage.NewDiskStore(fleetActionStoreFile())) + if err != nil { + return nil, errors.New(err, fmt.Sprintf("fail to read action store '%s'", fleetActionStoreFile())) + } + actionAcker := newActionStoreAcker(batchedAcker, actionStore) + + actionDispatcher, err := newActionDispatcher(managedApplication.bgContext, log, &handlerDefault{log: log}) + if err != nil { + return nil, err + } + + actionDispatcher.MustRegister( + &fleetapi.ActionPolicyChange{}, + &handlerPolicyChange{ + log: log, + emitter: emit, + }, + ) + + actionDispatcher.MustRegister( + &fleetapi.ActionUnknown{}, + &handlerUnknown{log: log}, + ) + + actions := actionStore.Actions() + if len(actions) > 0 { + // TODO(ph) We will need an improvement on fleet, if there is an error while dispatching a + // persisted action on disk we should be able to ask Fleet to get the latest configuration. + // But at the moment this is not possible because the policy change was acked. + if err := replayActions(log, actionDispatcher, actionAcker, actions...); err != nil { + log.Errorf("could not recover state, error %+v, skipping...", err) + } + } + + gateway, err := newFleetGateway( + managedApplication.bgContext, + log, + gatewaySettings, + agentInfo, + client, + actionDispatcher, + fleetR, + actionAcker, + ) + if err != nil { + return nil, err + } + + managedApplication.gateway = gateway + return managedApplication, nil +} + +// Start starts a managed agent. +func (m *Managed) Start() error { + m.log.Info("Agent is starting") + m.gateway.Start() + return nil +} + +// Stop stops a managed agent. +func (m *Managed) Stop() error { + defer m.log.Info("Agent is stopped") + m.gateway.Stop() + m.cancelCtxFn() + return nil +} + +// AgentInfo retrieves agent information. +func (m *Managed) AgentInfo() *info.AgentInfo { + return m.agentInfo +} diff --git a/x-pack/agent/pkg/agent/application/monitoring_decorator.go b/x-pack/agent/pkg/agent/application/monitoring_decorator.go new file mode 100644 index 00000000000..00789d51d16 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/monitoring_decorator.go @@ -0,0 +1,70 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/program" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/transpiler" +) + +const ( + monitoringName = "FLEET_MONITORING" + programsKey = "programs" + monitoringKey = "monitoring" + monitoringOutputKey = "monitoring.elasticsearch" + enabledKey = "monitoring.enabled" + outputKey = "output" + outputsKey = "outputs" + typeKey = "type" +) + +func injectMonitoring(outputGroup string, rootAst *transpiler.AST, programsToRun []program.Program) ([]program.Program, error) { + var err error + monitoringProgram := program.Program{ + Spec: program.Spec{ + Name: monitoringName, + Cmd: monitoringName, + }, + } + + var config map[string]interface{} + + if _, found := transpiler.Lookup(rootAst, monitoringKey); !found { + config = make(map[string]interface{}) + config[enabledKey] = false + } else { + ast := rootAst.Clone() + if err := getMonitoringRule(outputGroup).Apply(ast); err != nil { + return programsToRun, err + } + + config, err = ast.Map() + if err != nil { + return programsToRun, err + } + + programList := make([]string, 0, len(programsToRun)) + for _, p := range programsToRun { + programList = append(programList, p.Spec.Cmd) + } + // making program list part of the config + // so it will get regenerated with every change + config[programsKey] = programList + } + + monitoringProgram.Config, err = transpiler.NewAST(config) + if err != nil { + return programsToRun, err + } + + return append(programsToRun, monitoringProgram), nil +} + +func getMonitoringRule(outputName string) *transpiler.RuleList { + return transpiler.NewRuleList( + transpiler.Copy(monitoringOutputKey, outputKey), + transpiler.Filter(monitoringKey, programsKey, outputKey), + ) +} diff --git a/x-pack/agent/pkg/agent/application/monitoring_decorator_test.go b/x-pack/agent/pkg/agent/application/monitoring_decorator_test.go new file mode 100644 index 00000000000..fcbd9b9aeaf --- /dev/null +++ b/x-pack/agent/pkg/agent/application/monitoring_decorator_test.go @@ -0,0 +1,183 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "testing" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/program" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/transpiler" +) + +func TestMonitoringInjection(t *testing.T) { + ast, err := transpiler.NewAST(inputConfigMap) + if err != nil { + t.Fatal(err) + } + + programsToRun, err := program.Programs(ast) + if err != nil { + t.Fatal(err) + } + +GROUPLOOP: + for group, ptr := range programsToRun { + programsCount := len(ptr) + newPtr, err := injectMonitoring(group, ast, ptr) + if err != nil { + t.Error(err) + continue GROUPLOOP + } + + if programsCount == len(newPtr) { + t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) + continue GROUPLOOP + } + + for _, p := range newPtr { + if p.Spec.Name != monitoringName { + continue + } + + cm, err := p.Config.Map() + if err != nil { + t.Error(err) + continue GROUPLOOP + } + + outputCfg, found := cm[outputKey] + if !found { + t.Errorf("output not found for '%s'", group) + continue GROUPLOOP + } + + outputMap, ok := outputCfg.(map[string]interface{}) + if !ok { + t.Errorf("output is not a map for '%s'", group) + continue GROUPLOOP + } + + esCfg, found := outputMap["elasticsearch"] + if !found { + t.Errorf("elasticsearch output not found for '%s'", group) + continue GROUPLOOP + } + + esMap, ok := esCfg.(map[string]interface{}) + if !ok { + t.Errorf("output.elasticsearch is not a map for '%s'", group) + continue GROUPLOOP + } + + if uname, found := esMap["username"]; !found { + t.Errorf("output.elasticsearch.username output not found for '%s'", group) + continue GROUPLOOP + } else if uname != "monitoring-uname" { + t.Errorf("output.elasticsearch.username has incorrect value expected '%s', got '%s for %s", "monitoring-uname", uname, group) + continue GROUPLOOP + } + } + } +} + +var inputConfigMap = map[string]interface{}{ + "monitoring": map[string]interface{}{ + "enabled": true, + "logs": true, + "metrics": true, + "elasticsearch": map[string]interface{}{ + "index_name": "general", + "pass": "xxx", + "url": "xxxxx", + "username": "monitoring-uname", + }, + }, + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "index_name": "general", + "pass": "xxx", + "type": "elasticsearch", + "url": "xxxxx", + "username": "xxx", + }, + "infosec1": map[string]interface{}{ + "pass": "xxx", + "spool": map[string]interface{}{ + "file": "${path.data}/spool.dat", + }, + "type": "elasticsearch", + "url": "xxxxx", + "username": "xxx", + }, + }, + "streams": []interface{}{ + map[string]interface{}{ + "type": "log", + "path": "/xxxx", + "processors": []interface{}{ + map[string]interface{}{ + "dissect": map[string]interface{}{ + "tokenizer": "---", + }, + }, + }, + "output": map[string]interface{}{ + "override": map[string]interface{}{ + "index_name": "my_service_logs", + "ingest_pipeline": "process_logs", + }, + }, + }, + map[string]interface{}{ + "type": "metric/system", + "username": "xxxx", + "pass": "yyy", + "output": map[string]interface{}{ + "index_name": "mysql_metrics", + "use_output": "infosec1", + }, + }, + }, +} + +// const inputConfig = `outputs: +// default: +// index_name: general +// pass: xxx +// type: es +// url: xxxxx +// username: xxx +// infosec1: +// pass: xxx +// spool: +// file: "${path.data}/spool.dat" +// type: es +// url: xxxxx +// username: xxx +// streams: +// - +// output: +// override: +// index_name: my_service_logs +// ingest_pipeline: process_logs +// path: /xxxx +// processors: +// - +// dissect: +// tokenizer: "---" +// type: log +// - +// output: +// index_name: mysql_access_logs +// path: /xxxx +// type: log +// - +// output: +// index_name: mysql_metrics +// use_output: infosec1 +// pass: yyy +// type: metrics/system +// username: xxxx +// ` diff --git a/x-pack/agent/pkg/agent/application/once.go b/x-pack/agent/pkg/agent/application/once.go new file mode 100644 index 00000000000..f67e5d6611d --- /dev/null +++ b/x-pack/agent/pkg/agent/application/once.go @@ -0,0 +1,37 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +type once struct { + log *logger.Logger + discover discoverFunc + emitter emitterFunc +} + +func newOnce(log *logger.Logger, discover discoverFunc, emitter emitterFunc) *once { + return &once{log: log, discover: discover, emitter: emitter} +} + +func (o *once) Start() error { + files, err := o.discover() + if err != nil { + return errors.New(err, "could not discover configuration files", errors.TypeConfig) + } + + if len(files) == 0 { + return ErrNoConfiguration + } + + return readfiles(files, o.emitter) +} + +func (o *once) Stop() error { + return nil +} diff --git a/x-pack/agent/pkg/agent/application/periodic.go b/x-pack/agent/pkg/agent/application/periodic.go new file mode 100644 index 00000000000..c16407dbea6 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/periodic.go @@ -0,0 +1,124 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "strings" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/filewatcher" +) + +type periodic struct { + log *logger.Logger + period time.Duration + done chan struct{} + watcher *filewatcher.Watch + emitter emitterFunc + discover discoverFunc +} + +func (p *periodic) Start() error { + if err := p.work(); err != nil { + p.log.Debugf("Failed to read configuration, error: %s", err) + } + + for { + select { + case <-p.done: + break + case <-time.After(p.period): + } + + if err := p.work(); err != nil { + p.log.Debugf("Failed to read configuration, error: %s", err) + } + } +} + +func (p *periodic) work() error { + files, err := p.discover() + if err != nil { + return errors.New(err, "could not discover configuration files", errors.TypeConfig) + } + + if len(files) == 0 { + return ErrNoConfiguration + } + + // Reset the state of the watched files + p.watcher.Reset() + + p.log.Debugf("Adding %d file to watch", len(files)) + // Add any found files to the watchers + for _, f := range files { + p.watcher.Watch(f) + } + + // Check for the following: + // - Watching of new files. + // - Files watched but some of them have changed. + // - Files that we were watching but are not watched anymore. + s, err := p.watcher.Update() + if err != nil { + return errors.New(err, "could not update the configuration states", errors.TypeConfig) + } + + if s.NeedUpdate { + p.log.Info("Configuration changes detected") + if len(s.Unwatched) > 0 { + p.log.Debugf("Unwatching %d files: %s", len(s.Unwatched), strings.Join(s.Unwatched, ", ")) + } + + if len(s.Updated) > 0 { + p.log.Debugf("Updated %d files: %s", len(s.Updated), strings.Join(s.Updated, ", ")) + } + + if len(s.Unchanged) > 0 { + p.log.Debugf("Unchanged %d files: %s", len(s.Unchanged), strings.Join(s.Updated, ", ")) + } + + err := readfiles(files, p.emitter) + if err != nil { + // assume something when really wrong and invalidate any cache + // so we get a full new config on next tick. + p.watcher.Invalidate() + return errors.New(err, "could not emit configuration") + } + } + + p.log.Info("No configuration change") + return nil +} + +func (p *periodic) Stop() error { + close(p.done) + return nil +} + +func newPeriodic( + log *logger.Logger, + period time.Duration, + discover discoverFunc, + emitter emitterFunc, +) *periodic { + w, err := filewatcher.New(log, filewatcher.DefaultComparer) + + // this should not happen. + if err != nil { + panic(err) + } + + return &periodic{ + log: log, + period: period, + done: make(chan struct{}), + watcher: w, + discover: discover, + emitter: emitter, + } +} diff --git a/x-pack/agent/pkg/agent/application/router.go b/x-pack/agent/pkg/agent/application/router.go new file mode 100644 index 00000000000..3d4a46f36d2 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/router.go @@ -0,0 +1,114 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "fmt" + "strings" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/program" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/sorted" +) + +// defautlRK default routing keys until we implement the routing key / config matrix. +var defautlRK = "DEFAULT" + +type routingKey = string + +type stream interface { + Execute(*configRequest) error + Close() error +} + +type streamFunc func(*logger.Logger, routingKey) (stream, error) + +type router struct { + log *logger.Logger + routes *sorted.Set + streamFactory streamFunc +} + +func newRouter(log *logger.Logger, factory streamFunc) (*router, error) { + var err error + if log == nil { + log, err = logger.New() + if err != nil { + return nil, err + } + } + return &router{log: log, streamFactory: factory, routes: sorted.NewSet()}, nil +} + +func (r *router) Dispatch(id string, grpProg map[routingKey][]program.Program) error { + s := sorted.NewSet() + + // Make sure that starting and updating is always done in the same order. + for rk, programs := range grpProg { + s.Add(rk, programs) + } + + active := make(map[string]bool, len(grpProg)) + for _, rk := range s.Keys() { + active[rk] = true + + // Are we already runnings this streams? + // When it doesn't exist we just create it, if it already exist we forward the configuration. + p, ok := r.routes.Get(rk) + var err error + if !ok { + r.log.Debugf("Creating stream: %s", rk) + p, err = r.streamFactory(r.log, rk) + if err != nil { + return err + } + r.routes.Add(rk, p) + } + + programs, ok := s.Get(rk) + if !ok { + return fmt.Errorf("could not find programs for routing key %s", rk) + } + + req := &configRequest{ + id: id, + programs: programs.([]program.Program), + } + + r.log.Debugf( + "Streams %s need to run config with ID %s and programs: %s", + rk, + req.ShortID(), + strings.Join(req.ProgramNames(), ", "), + ) + + err = p.(stream).Execute(req) + if err != nil { + return err + } + } + + // cleanup inactive streams. + // streams are shutdown down in alphabetical order. + keys := r.routes.Keys() + for _, k := range keys { + _, ok := active[k] + if ok { + continue + } + + p, ok := r.routes.Get(k) + if !ok { + continue + } + + r.log.Debugf("Removing routing key %s", k) + + p.(stream).Close() + r.routes.Remove(k) + } + + return nil +} diff --git a/x-pack/agent/pkg/agent/application/router_test.go b/x-pack/agent/pkg/agent/application/router_test.go new file mode 100644 index 00000000000..507629f053b --- /dev/null +++ b/x-pack/agent/pkg/agent/application/router_test.go @@ -0,0 +1,223 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/program" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +type rOp int + +const ( + createOp rOp = iota + 1 + executeOp + closeOp +) + +func (r *rOp) String() string { + m := map[rOp]string{ + 1: "create", + 2: "execute", + 3: "close", + } + v, ok := m[*r] + if !ok { + return "unknown operation" + } + return v +} + +type event struct { + rk routingKey + op rOp +} + +type notifyFunc func(routingKey, rOp, ...interface{}) + +func TestRouter(t *testing.T) { + programs := []program.Program{program.Program{Spec: program.Supported[1]}} + + t.Run("create new and destroy unused stream", func(t *testing.T) { + recorder := &recorder{} + r, err := newRouter(nil, recorder.factory) + require.NoError(t, err) + r.Dispatch("hello", map[routingKey][]program.Program{ + defautlRK: programs, + }) + + assertOps(t, []event{ + e(defautlRK, createOp), + e(defautlRK, executeOp), + }, recorder.events) + + recorder.reset() + + nk := "NEW_KEY" + r.Dispatch("hello-2", map[routingKey][]program.Program{ + nk: programs, + }) + + assertOps(t, []event{ + e(nk, createOp), + e(nk, executeOp), + e(defautlRK, closeOp), + }, recorder.events) + }) + + t.Run("multiples create new and destroy unused stream", func(t *testing.T) { + k1 := "KEY_1" + k2 := "KEY_2" + + recorder := &recorder{} + r, err := newRouter(nil, recorder.factory) + require.NoError(t, err) + r.Dispatch("hello", map[routingKey][]program.Program{ + defautlRK: programs, + k1: programs, + k2: programs, + }) + + assertOps(t, []event{ + e(defautlRK, createOp), + e(defautlRK, executeOp), + + e(k1, createOp), + e(k1, executeOp), + + e(k2, createOp), + e(k2, executeOp), + }, recorder.events) + + recorder.reset() + + nk := "SECOND_DISPATCH" + r.Dispatch("hello-2", map[routingKey][]program.Program{ + nk: programs, + }) + + assertOps(t, []event{ + e(nk, createOp), + e(nk, executeOp), + + e(defautlRK, closeOp), + e(k1, closeOp), + e(k2, closeOp), + }, recorder.events) + }) + + t.Run("create new and delegate program to existing stream", func(t *testing.T) { + recorder := &recorder{} + r, err := newRouter(nil, recorder.factory) + require.NoError(t, err) + r.Dispatch("hello", map[routingKey][]program.Program{ + defautlRK: programs, + }) + + assertOps(t, []event{ + e(defautlRK, createOp), + e(defautlRK, executeOp), + }, recorder.events) + + recorder.reset() + + r.Dispatch("hello-2", map[routingKey][]program.Program{ + defautlRK: programs, + }) + + assertOps(t, []event{ + e(defautlRK, executeOp), + }, recorder.events) + }) + + t.Run("when no stream are detected we shutdown all the running streams", func(t *testing.T) { + k1 := "KEY_1" + k2 := "KEY_2" + + recorder := &recorder{} + r, err := newRouter(nil, recorder.factory) + require.NoError(t, err) + r.Dispatch("hello", map[routingKey][]program.Program{ + defautlRK: programs, + k1: programs, + k2: programs, + }) + + assertOps(t, []event{ + e(defautlRK, createOp), + e(defautlRK, executeOp), + e(k1, createOp), + e(k1, executeOp), + e(k2, createOp), + e(k2, executeOp), + }, recorder.events) + + recorder.reset() + + r.Dispatch("hello-2", map[routingKey][]program.Program{}) + + assertOps(t, []event{ + e(defautlRK, closeOp), + e(k1, closeOp), + e(k2, closeOp), + }, recorder.events) + }) +} + +type recorder struct { + events []event +} + +func (r *recorder) factory(_ *logger.Logger, rk routingKey) (stream, error) { + return newMockStream(rk, r.notify), nil +} + +func (r *recorder) notify(rk routingKey, op rOp, args ...interface{}) { + r.events = append(r.events, e(rk, op)) +} + +func (r *recorder) reset() { + r.events = nil +} + +type mockStream struct { + rk routingKey + notify notifyFunc +} + +func newMockStream(rk routingKey, notify notifyFunc) *mockStream { + notify(rk, createOp) + return &mockStream{ + rk: rk, + notify: notify, + } +} + +func (m *mockStream) Execute(req *configRequest) error { + m.event(executeOp, req) + return nil +} + +func (m *mockStream) Close() error { + m.event(closeOp) + return nil +} + +func (m *mockStream) event(op rOp, args ...interface{}) { + m.notify(m.rk, op, args...) +} + +func assertOps(t *testing.T, expected []event, received []event) { + require.Equal(t, len(expected), len(received), "Received number of operation doesn't match") + require.Equal(t, expected, received) +} + +func e(rk routingKey, op rOp) event { + return event{rk: rk, op: op} +} diff --git a/x-pack/agent/pkg/agent/application/stream.go b/x-pack/agent/pkg/agent/application/stream.go new file mode 100644 index 00000000000..43334735840 --- /dev/null +++ b/x-pack/agent/pkg/agent/application/stream.go @@ -0,0 +1,99 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + "io" + "net/http" + "net/url" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/operation" + operatorCfg "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/operation/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/stateresolver" + downloader "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download/localremote" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/install" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +// EventProcessor is an processor of application event +type reporter interface { + OnStarting(ctx context.Context, app string) + OnRunning(ctx context.Context, app string) + OnFailing(ctx context.Context, app string, err error) + OnStopping(ctx context.Context, app string) + OnStopped(ctx context.Context, app string) + OnFatal(ctx context.Context, app string, err error) +} + +type sender interface { + Send( + ctx context.Context, + method string, + path string, + params url.Values, + headers http.Header, + body io.Reader, + ) (*http.Response, error) +} + +type operatorStream struct { + configHandler ConfigHandler + log *logger.Logger +} + +func (b *operatorStream) Close() error { + return b.configHandler.HandleConfig(&configRequest{}) +} + +func (b *operatorStream) Execute(cfg *configRequest) error { + return b.configHandler.HandleConfig(cfg) +} + +func streamFactory(ctx context.Context, cfg *config.Config, client sender, r reporter) func(*logger.Logger, routingKey) (stream, error) { + return func(log *logger.Logger, id routingKey) (stream, error) { + // new operator per stream to isolate processes without using tags + operator, err := newOperator(ctx, log, id, cfg, r) + if err != nil { + return nil, err + } + + return &operatorStream{ + log: log, + configHandler: operator, + }, nil + } +} + +func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config *config.Config, r reporter) (*operation.Operator, error) { + operatorConfig := &operatorCfg.Config{} + if err := config.Unpack(&operatorConfig); err != nil { + return nil, err + } + + fetcher := downloader.NewDownloader(operatorConfig.DownloadConfig) + installer, err := install.NewInstaller(operatorConfig.DownloadConfig) + if err != nil { + return nil, errors.New(err, "initiating installer") + } + + stateResolver, err := stateresolver.NewStateResolver(log) + if err != nil { + return nil, err + } + + return operation.NewOperator( + ctx, + log, + id, + config, + fetcher, + installer, + stateResolver, + r, + ) +} diff --git a/x-pack/agent/pkg/agent/cmd/cmd_test.go b/x-pack/agent/pkg/agent/cmd/cmd_test.go new file mode 100644 index 00000000000..fb0ba3975a7 --- /dev/null +++ b/x-pack/agent/pkg/agent/cmd/cmd_test.go @@ -0,0 +1,32 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cmd + +import ( + "testing" +) + +func TestAgent(t *testing.T) { + // t.Run("test agent with subcommand", func(t *testing.T) { + // streams, _, _, _ := cli.NewTestingIOStreams() + // cmd := NewCommandWithArgs([]string{}, streams) + // cmd.SetOutput(streams.Out) + // cmd.Execute() + // }) + + // t.Run("test run subcommand", func(t *testing.T) { + // streams, _, out, _ := cli.NewTestingIOStreams() + // cmd := newRunCommandWithArgs(globalFlags{ + // PathConfigFile: filepath.Join("build", "agent.yml"), + // }, []string{}, streams) + // cmd.SetOutput(streams.Out) + // cmd.Execute() + // contents, err := ioutil.ReadAll(out) + // if !assert.NoError(t, err) { + // return + // } + // assert.True(t, strings.Contains(string(contents), "Hello I am running")) + // }) +} diff --git a/x-pack/agent/pkg/agent/cmd/common.go b/x-pack/agent/pkg/agent/cmd/common.go new file mode 100644 index 00000000000..7c8019f3c61 --- /dev/null +++ b/x-pack/agent/pkg/agent/cmd/common.go @@ -0,0 +1,66 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cmd + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/basecmd" + "github.com/elastic/beats/v7/x-pack/agent/pkg/cli" +) + +var defaultConfig = "agent.yml" + +type globalFlags struct { + PathConfigFile string + PathConfig string + PathData string + PathHome string + PathLogs string + FlagStrictPerms bool +} + +func (f *globalFlags) Config() string { + if len(f.PathConfigFile) == 0 { + return filepath.Join(f.PathHome, defaultConfig) + } + return f.PathConfigFile +} + +func (f *globalFlags) StrictPermission() bool { + return f.FlagStrictPerms +} + +// NewCommand returns the default command for the agent. +func NewCommand() *cobra.Command { + return NewCommandWithArgs(os.Args, cli.NewIOStreams()) +} + +// NewCommandWithArgs returns a new agent with the flags and the subcommand. +func NewCommandWithArgs(args []string, streams *cli.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "agent [subcommand]", + } + + flags := &globalFlags{} + + cmd.PersistentFlags().StringVarP(&flags.PathConfigFile, "", "c", defaultConfig, fmt.Sprintf(`Configuration file, relative to path.config (default "%s")`, defaultConfig)) + cmd.PersistentFlags().StringVarP(&flags.PathHome, "path.home", "", "", "Home path") + cmd.PersistentFlags().StringVarP(&flags.PathConfig, "path.config", "", "${path.home}", "Configuration path") + cmd.PersistentFlags().StringVarP(&flags.PathData, "path.data", "", "${path.home}/data", "Data path") + cmd.PersistentFlags().StringVarP(&flags.PathLogs, "path.logs", "", "${path.home}/logs", "Logs path") + cmd.PersistentFlags().BoolVarP(&flags.FlagStrictPerms, "strict.perms", "", true, "Strict permission checking on config files") + + // Add version. + cmd.AddCommand(basecmd.NewDefaultCommandsWithArgs(args, streams)...) + cmd.AddCommand(newRunCommandWithArgs(flags, args, streams)) + cmd.AddCommand(newEnrollCommandWithArgs(flags, args, streams)) + + return cmd +} diff --git a/x-pack/agent/pkg/agent/cmd/enroll.go b/x-pack/agent/pkg/agent/cmd/enroll.go new file mode 100644 index 00000000000..dd2cd71b0ca --- /dev/null +++ b/x-pack/agent/pkg/agent/cmd/enroll.go @@ -0,0 +1,113 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cmd + +import ( + "fmt" + "math/rand" + "os" + "time" + + "github.com/spf13/cobra" + + c "github.com/elastic/beats/v7/libbeat/common/cli" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/application" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/cli" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +var defaultDelay = 1 * time.Second + +func newEnrollCommandWithArgs(flags *globalFlags, _ []string, streams *cli.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "enroll ", + Short: "Enroll the Agent into Fleet", + Long: "This will enroll the Agent into Fleet.", + Args: cobra.ExactArgs(2), + Run: func(c *cobra.Command, args []string) { + if err := enroll(streams, c, flags, args); err != nil { + fmt.Fprintf(streams.Err, "%v\n", err) + os.Exit(1) + } + }, + } + + cmd.Flags().StringP("certificate_authorities", "a", "", "Comma separated list of root certificate for server verifications") + cmd.Flags().StringP("ca_sha256", "p", "", "Comma separated list of certificate authorities hash pins used for certificate verifications") + cmd.Flags().BoolP("force", "f", false, "Force overwrite the current and do not prompt for confirmation") + + return cmd +} + +func enroll(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags, args []string) error { + config, err := config.LoadYAML(flags.PathConfigFile) + if err != nil { + return errors.New(err, + fmt.Sprintf("could not read configuration file %s", flags.PathConfigFile), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, flags.PathConfigFile)) + } + + force, _ := cmd.Flags().GetBool("force") + if !force { + confirm, err := c.Confirm("This will replace your current settings. Do you want to continue?", true) + if err != nil { + return errors.New(err, "problem reading prompt response") + } + if !confirm { + fmt.Fprintln(streams.Out, "Enrollment was canceled by the user") + return nil + } + } + + logger, err := logger.NewFromConfig(config) + if err != nil { + return err + } + + url := args[0] + enrollmentToken := args[1] + + caStr, _ := cmd.Flags().GetString("certificate_authorities") + CAs := cli.StringToSlice(caStr) + + caSHA256str, _ := cmd.Flags().GetString("ca_sha256") + caSHA256 := cli.StringToSlice(caSHA256str) + + delay(defaultDelay) + + options := application.EnrollCmdOption{ + ID: "", // TODO(ph), This should not be an empty string, will clarify in a new PR. + EnrollAPIKey: enrollmentToken, + URL: url, + CAs: CAs, + CASha256: caSHA256, + UserProvidedMetadata: make(map[string]interface{}), + } + + c, err := application.NewEnrollCmd( + logger, + &options, + flags.PathConfigFile, + ) + + if err != nil { + return err + } + + err = c.Execute() + if err != nil { + return errors.New(err, "fail to enroll") + } + + fmt.Fprintln(streams.Out, "Successfully enrolled the Agent.") + return nil +} + +func delay(t time.Duration) { + <-time.After(time.Duration(rand.Int63n(int64(t)))) +} diff --git a/x-pack/agent/pkg/agent/cmd/run.go b/x-pack/agent/pkg/agent/cmd/run.go new file mode 100644 index 00000000000..9bddb41f2e6 --- /dev/null +++ b/x-pack/agent/pkg/agent/cmd/run.go @@ -0,0 +1,65 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cmd + +import ( + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/spf13/cobra" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/application" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/cli" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +func newRunCommandWithArgs(flags *globalFlags, _ []string, streams *cli.IOStreams) *cobra.Command { + return &cobra.Command{ + Use: "run", + Short: "Start the agent.", + Run: func(_ *cobra.Command, _ []string) { + if err := run(flags, streams); err != nil { + fmt.Fprintf(streams.Err, "%v\n", err) + os.Exit(1) + } + }, + } +} + +func run(flags *globalFlags, streams *cli.IOStreams) error { + config, err := config.LoadYAML(flags.PathConfigFile) + if err != nil { + return errors.New(err, + fmt.Sprintf("could not read configuration file %s", flags.PathConfigFile), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, flags.PathConfigFile)) + } + + logger, err := logger.NewFromConfig(config) + if err != nil { + return err + } + + app, err := application.New(logger, flags.PathConfigFile) + if err != nil { + return err + } + + if err := app.Start(); err != nil { + return err + } + + // listen for kill signal + signals := make(chan os.Signal, 1) + signal.Notify(signals, syscall.SIGINT, syscall.SIGKILL, syscall.SIGTERM, syscall.SIGQUIT) + + <-signals + + return app.Stop() +} diff --git a/x-pack/agent/pkg/agent/configrequest/request.go b/x-pack/agent/pkg/agent/configrequest/request.go new file mode 100644 index 00000000000..2c29513ed76 --- /dev/null +++ b/x-pack/agent/pkg/agent/configrequest/request.go @@ -0,0 +1,18 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package configrequest + +import ( + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/program" +) + +// Request is the minimal interface a config request must have. +type Request interface { + ID() string + CreatedAt() time.Time + Programs() []program.Program +} diff --git a/x-pack/agent/pkg/agent/configrequest/step.go b/x-pack/agent/pkg/agent/configrequest/step.go new file mode 100644 index 00000000000..ea96090ec55 --- /dev/null +++ b/x-pack/agent/pkg/agent/configrequest/step.go @@ -0,0 +1,31 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package configrequest + +const ( + // StepRun is a name of Start program event + StepRun = "sc-run" + // StepRemove is a name of Remove program event causing beat in version to be uninstalled + StepRemove = "sc-remove" + + // MetaConfigKey is key used to store configuration in metadata + MetaConfigKey = "config" +) + +// Step is a step needed to be applied +type Step struct { + // ID identifies kind of operation needed to be executed + ID string + // Version is a version of a program + Version string + // Process defines a process such as `filebeat` + Process string + // Meta contains additional data such as version, configuration or tags. + Meta map[string]interface{} +} + +func (s *Step) String() string { + return "[ID:" + s.ID + ", PROCESS: " + s.Process + " VERSION:" + s.Version + "]" +} diff --git a/x-pack/agent/pkg/agent/errors/error.go b/x-pack/agent/pkg/agent/errors/error.go new file mode 100644 index 00000000000..c972ed1f032 --- /dev/null +++ b/x-pack/agent/pkg/agent/errors/error.go @@ -0,0 +1,103 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package errors + +import "github.com/pkg/errors" + +// MetaRecord is a entry of metadata enhancing an error. +type MetaRecord struct { + key string + val interface{} +} + +// Error is an interface defining custom agent error. +type Error interface { + Error() string + Type() ErrorType + ReadableType() string + Meta() map[string]interface{} +} + +type agentError struct { + msg string + err error + errType ErrorType + meta map[string]interface{} +} + +// Error returns a string consisting of a message and originating error. +func (e agentError) Error() string { + if e.msg != "" { + return errors.Wrap(e.err, e.msg).Error() + } + + return e.err.Error() +} + +// Type recursively checks errors and return first known not default error type. +func (e agentError) Type() ErrorType { + if e.errType != 0 { + return e.errType + } + + if e.err == nil { + return TypeUnexpected + } + + inner, ok := e.err.(Error) + if causeErr := errors.Cause(e.err); !ok && causeErr == e.err { + return TypeUnexpected + } else if !ok { + // err is wrapped + customCause := New(causeErr).(Error) + return customCause.Type() + } + + return inner.Type() +} + +// ReadableType recursively checks errors and return first known not default +// error type and returns its readable representation. +func (e agentError) ReadableType() string { + etype := e.Type() + if hrt, found := readableTypes[etype]; found { + return hrt + } + + return "UNEXPECTED" +} + +func (e agentError) Meta() map[string]interface{} { + inner, ok := e.err.(Error) + if causeErr := errors.Cause(e.err); !ok && causeErr == e.err { + return e.meta + } else if !ok { + inner = New(causeErr).(Error) + } + + innerMeta := inner.Meta() + resultingMeta := make(map[string]interface{}) + + // copy so we don't modify values + for k, v := range e.meta { + resultingMeta[k] = v + } + + for k, v := range innerMeta { + if _, found := resultingMeta[k]; found { + continue + } + + resultingMeta[k] = v + } + + return resultingMeta +} + +// Check it implements Error +var _ Error = agentError{} + +// Check it implements error +var _ error = agentError{} diff --git a/x-pack/agent/pkg/agent/errors/error_test.go b/x-pack/agent/pkg/agent/errors/error_test.go new file mode 100644 index 00000000000..8b764f48ee5 --- /dev/null +++ b/x-pack/agent/pkg/agent/errors/error_test.go @@ -0,0 +1,194 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package errors + +import ( + "fmt" + "strings" + "testing" + + "github.com/pkg/errors" +) + +func TestErrorsWrap(t *testing.T) { + ce := New("custom error", TypePath, M("k", "v")) + ew := errors.Wrap(ce, "wrapper") + outer := New(ew) + + outerCustom, ok := outer.(Error) + if !ok { + t.Error("expected Error") + return + } + + if tt := outerCustom.Type(); tt != TypePath { + t.Errorf("expected type Path got %v", tt) + } + + meta := outerCustom.Meta() + if _, found := meta["k"]; !found { + t.Errorf("expected meta with key 'k' but not found") + } +} + +func TestErrors(t *testing.T) { + type testCase struct { + id string + expectedType ErrorType + expectedReadableType string + expectedError string + expectedMeta map[string]interface{} + args []interface{} + } + + cases := []testCase{ + testCase{"custom message", TypeUnexpected, "UNEXPECTED", "msg1: err1", nil, []interface{}{fmt.Errorf("err1"), "msg1"}}, + testCase{"no message", TypeUnexpected, "UNEXPECTED", "err1", nil, []interface{}{fmt.Errorf("err1")}}, + + testCase{"custom type (crash)", TypeApplicationCrash, "CRASH", "msg1: err1", nil, []interface{}{fmt.Errorf("err1"), "msg1", TypeApplicationCrash}}, + testCase{"custom type (config)", TypeConfig, "CONFIG", "msg1: err1", nil, []interface{}{fmt.Errorf("err1"), "msg1", TypeConfig}}, + testCase{"custom type (path)", TypePath, "PATH", "msg1: err1", nil, []interface{}{fmt.Errorf("err1"), "msg1", TypePath}}, + + testCase{"meta simple", TypeUnexpected, "UNEXPECTED", "msg1: err1", map[string]interface{}{"a": 1}, []interface{}{fmt.Errorf("err1"), "msg1", M("a", 1)}}, + testCase{"meta two keys", TypeUnexpected, "UNEXPECTED", "msg1: err1", map[string]interface{}{"a": 1, "b": 21}, []interface{}{fmt.Errorf("err1"), "msg1", M("a", 1), M("b", 21)}}, + testCase{"meta overriding key", TypeUnexpected, "UNEXPECTED", "msg1: err1", map[string]interface{}{"a": 21}, []interface{}{fmt.Errorf("err1"), "msg1", M("a", 1), M("a", 21)}}, + + testCase{"overriding custom message", TypeUnexpected, "UNEXPECTED", "msg2: err1", nil, []interface{}{fmt.Errorf("err1"), "msg1", "msg2"}}, + testCase{"overriding custom type (crash)", TypeApplicationCrash, "CRASH", "msg1: err1", nil, []interface{}{fmt.Errorf("err1"), "msg1", TypeConfig, TypeApplicationCrash}}, + testCase{"overriding error", TypeUnexpected, "UNEXPECTED", "err2", nil, []interface{}{fmt.Errorf("err1"), fmt.Errorf("err2")}}, + } + + for _, tc := range cases { + actualErr := New(tc.args...) + agentErr, ok := actualErr.(Error) + if !ok { + t.Errorf("[%s] expected Error", tc.id) + continue + } + + if e := agentErr.Error(); e != tc.expectedError { + t.Errorf("[%s] expected error: '%s', got '%s'", tc.id, tc.expectedError, e) + } + if e := agentErr.Type(); e != tc.expectedType { + t.Errorf("[%s] expected error type: '%v', got '%v'", tc.id, tc.expectedType, e) + } + if e := agentErr.ReadableType(); e != tc.expectedReadableType { + t.Errorf("[%s] expected error readable type: '%v', got '%v'", tc.id, tc.expectedReadableType, e) + } + + if e := agentErr.Meta(); len(e) != len(tc.expectedMeta) { + t.Errorf("[%s] expected meta length: '%v', got '%v'", tc.id, len(tc.expectedReadableType), len(e)) + } + + if len(tc.expectedMeta) != 0 { + e := agentErr.Meta() + for ek, ev := range tc.expectedMeta { + v, found := e[ek] + if !found { + t.Errorf("[%s] expected meta key: '%v' not found", tc.id, ek) + } + + if ev != v { + t.Errorf("[%s] expected meta value for key: '%v' not equal. Expected: '%v', got: '%v'", tc.id, ek, ev, v) + } + } + } + } +} + +func TestNoErrorNoMsg(t *testing.T) { + actualErr := New() + agentErr, ok := actualErr.(Error) + if !ok { + t.Error("expected Error") + return + } + + e := agentErr.Error() + if !strings.Contains(e, "error_test.go[") { + t.Errorf("Error does not contain source file: %v", e) + } + + if !strings.HasSuffix(e, ": unknown error") { + t.Errorf("Error does not contain default error: %v", e) + } +} + +func TestNoError(t *testing.T) { + // test with message + msg := "msg2" + actualErr := New(msg) + agentErr, ok := actualErr.(Error) + if !ok { + t.Error("expected Error") + return + } + + e := agentErr.Error() + if !strings.Contains(e, "error_test.go[") { + t.Errorf("Error does not contain source file: %v", e) + } + + if !strings.HasSuffix(e, ": unknown error") { + t.Errorf("Error does not contain default error: %v", e) + } + + if !strings.HasPrefix(e, msg) { + t.Errorf("Error does not contain provided message: %v", e) + } +} + +func TestMetaFold(t *testing.T) { + err1 := fmt.Errorf("level1") + err2 := New("level2", err1, M("key1", "level2"), M("key2", "level2")) + err3 := New("level3", err2, M("key1", "level3"), M("key3", "level3")) + err4 := New("level4", err3) + + resultingErr, ok := err4.(Error) + if !ok { + t.Fatal("error is not Error") + } + + meta := resultingErr.Meta() + expectedMeta := map[string]interface{}{ + "key1": "level3", + "key2": "level2", + "key3": "level3", + } + + if len(expectedMeta) != len(meta) { + t.Fatalf("Metadata do not match expected '%v' got '%v'", expectedMeta, meta) + } + + for ek, ev := range expectedMeta { + v, found := meta[ek] + if !found { + t.Errorf("Key '%s' not found in a meta collection", ek) + continue + } + + if v != ev { + t.Errorf("Values for key '%s' don't match. Expected: '%v', got '%v'", ek, ev, v) + } + } +} + +func TestMetaCallDoesNotModifyCollection(t *testing.T) { + err1 := fmt.Errorf("level1") + err2 := New("level2", err1, M("key1", "level2"), M("key2", "level2")) + err3 := New("level3", err2, M("key1", "level3"), M("key3", "level3")) + err4 := New("level4", err3) + + resultingErr, ok := err4.(agentError) + if !ok { + t.Fatal("error is not Error") + } + + resultingErr.Meta() + + if len(resultingErr.meta) != 0 { + t.Fatalf("err4.meta modified by calling Meta(): %v", resultingErr.meta) + } +} diff --git a/x-pack/agent/pkg/agent/errors/generators.go b/x-pack/agent/pkg/agent/errors/generators.go new file mode 100644 index 00000000000..26a067f4ce8 --- /dev/null +++ b/x-pack/agent/pkg/agent/errors/generators.go @@ -0,0 +1,55 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package errors + +import ( + "fmt" + "runtime" + + "github.com/pkg/errors" +) + +// M creates a meta entry for an error +func M(key string, val interface{}) MetaRecord { + return MetaRecord{key: key, + val: val, + } +} + +// New constructs an Agent Error based on provided parameteres. +// Accepts: +// - string for error message [0..1] +// - error for inner error [0..1] +// - ErrorType for defining type [0..1] +// - MetaRecords for enhancing error with metadata [0..*] +// If optional arguments are provided more than once (message, error, type), then +// last argument overwrites previous ones. +func New(args ...interface{}) error { + agentErr := agentError{} + agentErr.meta = make(map[string]interface{}) + + for _, arg := range args { + switch arg := arg.(type) { + case string: + agentErr.msg = arg + case error: + agentErr.err = arg + case ErrorType: + agentErr.errType = arg + case MetaRecord: + agentErr.meta[arg.key] = arg.val + } + } + + if agentErr.err == nil { + agentErr.err = errors.New("unknown error") + + if _, file, line, ok := runtime.Caller(1); ok { + agentErr.err = errors.Wrapf(agentErr.err, fmt.Sprintf("%s[%d]", file, line)) + } + } + + return agentErr +} diff --git a/x-pack/agent/pkg/agent/errors/types.go b/x-pack/agent/pkg/agent/errors/types.go new file mode 100644 index 00000000000..7bf3702bd20 --- /dev/null +++ b/x-pack/agent/pkg/agent/errors/types.go @@ -0,0 +1,49 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package errors + +// ErrorType defines an error +type ErrorType int + +const ( + // TypeUnexpected is a default error type for errors without specified type. + TypeUnexpected ErrorType = iota + // TypeConfig is a configuration error. + TypeConfig + // TypePath is an invalid path error. + TypePath + // TypeApplication is an error describing errors related to application state. + TypeApplication + // TypeApplicationCrash is an error describing unexpected application crash. + TypeApplicationCrash + // TypeNetwork represents collection of errors related to networking. + TypeNetwork + // TypeFilesystem represents set of errors generated by filesystem operations. + TypeFilesystem + // TypeSecurity represents set of errors related to security, encryption, etc. + TypeSecurity +) + +const ( + // MetaKeyPath is a metadata key used used in filesystem errors. + MetaKeyPath = "path" + // MetaKeyURI is a metadata key used in network related errors. + MetaKeyURI = "uri" + // MetaKeyAppID is a metadata key used to identify application related to error. + MetaKeyAppID = "app_id" + // MetaKeyAppName is a metadata key used to specify application name related to error. + MetaKeyAppName = "app_name" +) + +var readableTypes = map[ErrorType]string{ + TypeUnexpected: "UNEXPECTED", + TypeConfig: "CONFIG", + TypePath: "PATH", + TypeApplicationCrash: "CRASH", + TypeApplication: "APPLICATION", + TypeNetwork: "NETWORK", + TypeFilesystem: "FILESYSTEM", + TypeSecurity: "SECURITY", +} diff --git a/x-pack/agent/pkg/agent/internal/yamltest/yaml.go b/x-pack/agent/pkg/agent/internal/yamltest/yaml.go new file mode 100644 index 00000000000..c766448550a --- /dev/null +++ b/x-pack/agent/pkg/agent/internal/yamltest/yaml.go @@ -0,0 +1,54 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package yamltest + +import ( + "gopkg.in/yaml.v2" +) + +// FromYAML read a bytes slice and return a map[string]interface{}. +// NOTE:OK, The YAML (v2 and v3) parser doesn't work with map as you would expect, it doesn't detect +// map[string]interface{} when parsing the document, it instead uses a map[interface{}]interface{}, +// In the following expression, the left side is actually a bool and not a string. +// +// false: "awesome" +func FromYAML(in []byte, out *map[string]interface{}) error { + var readTo map[interface{}]interface{} + if err := yaml.Unmarshal(in, &readTo); err != nil { + return err + } + + *out = cleanMap(readTo) + + return nil +} + +func cleanSlice(in []interface{}) []interface{} { + result := make([]interface{}, len(in)) + for i, v := range in { + result[i] = cleanValue(v) + } + return result +} + +func cleanMap(in map[interface{}]interface{}) map[string]interface{} { + result := make(map[string]interface{}) + for k, v := range in { + key := k.(string) + result[key] = cleanValue(v) + } + return result +} + +func cleanValue(v interface{}) interface{} { + switch v := v.(type) { + case []interface{}: + return cleanSlice(v) + case map[interface{}]interface{}: + return cleanMap(v) + default: + return v + } +} diff --git a/x-pack/agent/pkg/agent/operation/config/config.go b/x-pack/agent/pkg/agent/operation/config/config.go new file mode 100644 index 00000000000..78e4ed78ee7 --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/config/config.go @@ -0,0 +1,22 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +import ( + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app/monitoring" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/process" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/retry" +) + +// Config is an operator configuration +type Config struct { + ProcessConfig *process.Config `yaml:"process" config:"process"` + RetryConfig *retry.Config `yaml:"retry" config:"retry"` + + DownloadConfig *artifact.Config `yaml:"download" config:"download"` + + MonitoringConfig *monitoring.Config `yaml:"monitoring" config:"monitoring"` +} diff --git a/x-pack/agent/pkg/agent/operation/event_processor.go b/x-pack/agent/pkg/agent/operation/event_processor.go new file mode 100644 index 00000000000..ecbb98960cb --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/event_processor.go @@ -0,0 +1,26 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operation + +import "context" + +// EventProcessor is an processor of application event +type callbackHooks interface { + OnStarting(ctx context.Context, app string) + OnRunning(ctx context.Context, app string) + OnFailing(ctx context.Context, app string, err error) + OnStopping(ctx context.Context, app string) + OnStopped(ctx context.Context, app string) + OnFatal(ctx context.Context, app string, err error) +} + +type noopCallbackHooks struct{} + +func (*noopCallbackHooks) OnStarting(ctx context.Context, app string) {} +func (*noopCallbackHooks) OnRunning(ctx context.Context, app string) {} +func (*noopCallbackHooks) OnFailing(ctx context.Context, app string, err error) {} +func (*noopCallbackHooks) OnStopping(ctx context.Context, app string) {} +func (*noopCallbackHooks) OnStopped(ctx context.Context, app string) {} +func (*noopCallbackHooks) OnFatal(ctx context.Context, app string, err error) {} diff --git a/x-pack/agent/pkg/agent/operation/monitoring.go b/x-pack/agent/pkg/agent/operation/monitoring.go new file mode 100644 index 00000000000..bd988aad477 --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/monitoring.go @@ -0,0 +1,273 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operation + +import ( + "github.com/hashicorp/go-multierror" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/configrequest" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app/monitoring" +) + +const ( + monitoringName = "FLEET_MONITORING" + monitoringKey = "monitoring" + outputKey = "output" + monitoringEnabledSubkey = "enabled" +) + +func (o *Operator) handleStartSidecar(s configrequest.Step) (result error) { + cfg, err := getConfigFromStep(s) + if err != nil { + return errors.New(err, + errors.TypeConfig, + "operator.handleStartSidecar failed to retrieve config from step") + } + + // if monitoring is disabled and running stop it + if isEnabled := isMonitoringEnabled(o.logger, cfg); !isEnabled { + if o.isMonitoring { + o.logger.Info("operator.handleStartSidecar: monitoring is running and disabled, proceeding to stop") + return o.handleStopSidecar(s) + } + + o.logger.Info("operator.handleStartSidecar: monitoring is not running and disabled, no action taken") + return nil + } + + o.isMonitoring = true + + for _, step := range o.getMonitoringSteps(s) { + p, cfg, err := getProgramFromStepWithTags(step, o.config.DownloadConfig, monitoringTags()) + if err != nil { + return errors.New(err, + errors.TypeApplication, + errors.M(errors.MetaKeyAppName, step.Process), + "operator.handleStartSidecar failed to create program") + } + + // best effort on starting monitoring, if no hosts provided stop and spare resources + if step.ID == configrequest.StepRemove { + if err := o.stop(p); err != nil { + result = multierror.Append(err, err) + } + } else { + if err := o.start(p, cfg); err != nil { + result = multierror.Append(err, err) + } + } + } + + return result +} + +func (o *Operator) handleStopSidecar(s configrequest.Step) (result error) { + for _, step := range o.getMonitoringSteps(s) { + p, _, err := getProgramFromStepWithTags(step, o.config.DownloadConfig, monitoringTags()) + if err != nil { + return errors.New(err, + errors.TypeApplication, + errors.M(errors.MetaKeyAppName, step.Process), + "operator.handleStopSidecar failed to create program") + } + + if err := o.stop(p); err != nil { + result = multierror.Append(err, err) + } + } + + // if result != nil then something might be still running, setting isMonitoring to false + // will prevent tearing it down in a future + if result == nil { + o.isMonitoring = false + } + + return result +} + +func monitoringTags() map[app.Tag]string { + return map[app.Tag]string{ + app.TagSidecar: "true", + } +} + +func isMonitoringEnabled(logger *logger.Logger, cfg map[string]interface{}) bool { + monitoringVal, found := cfg[monitoringKey] + if !found { + logger.Error("operator.isMonitoringEnabled: monitoring not found in config") + return false + } + + monitoringMap, ok := monitoringVal.(map[string]interface{}) + if !ok { + logger.Error("operator.isMonitoringEnabled: monitoring not a map") + return false + } + + enabledVal, found := monitoringMap[monitoringEnabledSubkey] + if !found { + logger.Infof("operator.isMonitoringEnabled: monitoring.enabled key not found: %v", monitoringMap) + return false + } + + enabled, ok := enabledVal.(bool) + + return enabled && ok +} + +func (o *Operator) getMonitoringSteps(step configrequest.Step) []configrequest.Step { + // get output + config, err := getConfigFromStep(step) + if err != nil { + o.logger.Error("operator.getMonitoringSteps: getting config from step failed: %v", err) + return nil + } + + outputIface, found := config[outputKey] + if !found { + o.logger.Errorf("operator.getMonitoringSteps: monitoring configuration not found for sidecar of type %s", step.Process) + return nil + } + + outputMap, ok := outputIface.(map[string]interface{}) + if !ok { + o.logger.Error("operator.getMonitoringSteps: monitoring config is not a map") + return nil + } + + output, found := outputMap["elasticsearch"] + if !found { + o.logger.Error("operator.getMonitoringSteps: monitoring is missing an elasticsearch output configuration configuration for sidecar of type: %s", step.Process) + return nil + } + + return o.generateMonitoringSteps(o.config.MonitoringConfig, step.Version, output) +} + +func (o *Operator) generateMonitoringSteps(cfg *monitoring.Config, version string, output interface{}) []configrequest.Step { + var steps []configrequest.Step + + if cfg.MonitorLogs { + fbConfig, any := o.getMonitoringFilebeatConfig(output) + stepID := configrequest.StepRun + if !any { + stepID = configrequest.StepRemove + } + filebeatStep := configrequest.Step{ + ID: stepID, + Version: version, + Process: "filebeat", + Meta: map[string]interface{}{ + configrequest.MetaConfigKey: fbConfig, + }, + } + + steps = append(steps, filebeatStep) + } + + if cfg.MonitorMetrics { + mbConfig, any := o.getMonitoringMetricbeatConfig(output) + stepID := configrequest.StepRun + if !any { + stepID = configrequest.StepRemove + } + + metricbeatStep := configrequest.Step{ + ID: stepID, + Version: version, + Process: "metricbeat", + Meta: map[string]interface{}{ + configrequest.MetaConfigKey: mbConfig, + }, + } + + steps = append(steps, metricbeatStep) + } + + return steps +} + +func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]interface{}, bool) { + paths := o.getLogFilePaths() + if len(paths) == 0 { + return nil, false + } + + result := map[string]interface{}{ + "filebeat": map[string]interface{}{ + "inputs": []interface{}{ + map[string]interface{}{ + "type": "log", + "paths": paths, + }, + }, + }, + "output": map[string]interface{}{ + "elasticsearch": output, + }, + } + + return result, true +} + +func (o *Operator) getMonitoringMetricbeatConfig(output interface{}) (map[string]interface{}, bool) { + hosts := o.getMetricbeatEndpoints() + if len(hosts) == 0 { + return nil, false + } + + result := map[string]interface{}{ + "metricbeat": map[string]interface{}{ + "modules": []interface{}{ + map[string]interface{}{ + "module": "beat", + "metricsets": []string{"stats", "state"}, + "period": "10s", + "hosts": hosts, + }, + }, + }, + "output": map[string]interface{}{ + "elasticsearch": output, + }, + } + + return result, true +} + +func (o *Operator) getLogFilePaths() []string { + var paths []string + + o.appsLock.Lock() + defer o.appsLock.Unlock() + + for _, a := range o.apps { + logPath := a.Monitor().LogPath() + if logPath != "" { + paths = append(paths, logPath) + } + } + + return paths +} + +func (o *Operator) getMetricbeatEndpoints() []string { + var endpoints []string + + o.appsLock.Lock() + defer o.appsLock.Unlock() + + for _, a := range o.apps { + metricEndpoint := a.Monitor().MetricsPathPrefixed() + if metricEndpoint != "" { + endpoints = append(endpoints, metricEndpoint) + } + } + + return endpoints +} diff --git a/x-pack/agent/pkg/agent/operation/monitoring_test.go b/x-pack/agent/pkg/agent/operation/monitoring_test.go new file mode 100644 index 00000000000..8ceabd6220a --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/monitoring_test.go @@ -0,0 +1,151 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build linux darwin + +package operation + +import ( + "context" + "testing" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/configrequest" + operatorCfg "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/operation/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/stateresolver" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app/monitoring" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app/monitoring/beats" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/process" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/retry" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/state" +) + +func TestGenerateSteps(t *testing.T) { + const sampleOutput = "sample-output" + operator, _ := getMonitorableTestOperator(t, "tests/scripts") + + type testCase struct { + Name string + Config *monitoring.Config + ExpectedSteps int + FilebeatStep bool + MetricbeatStep bool + } + + testCases := []testCase{ + testCase{"NO monitoring", &monitoring.Config{MonitorLogs: false, MonitorMetrics: false}, 0, false, false}, + testCase{"FB monitoring", &monitoring.Config{MonitorLogs: true, MonitorMetrics: false}, 1, true, false}, + testCase{"MB monitoring", &monitoring.Config{MonitorLogs: false, MonitorMetrics: true}, 1, false, true}, + testCase{"ALL monitoring", &monitoring.Config{MonitorLogs: true, MonitorMetrics: true}, 2, true, true}, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + steps := operator.generateMonitoringSteps(tc.Config, "8.0", sampleOutput) + if actualSteps := len(steps); actualSteps != tc.ExpectedSteps { + t.Fatalf("invalid number of steps, expected %v, got %v", tc.ExpectedSteps, actualSteps) + } + + var fbFound, mbFound bool + for _, s := range steps { + // Filebeat step check + if s.Process == "filebeat" { + fbFound = true + checkStep(t, "filebeat", sampleOutput, s) + } + + // Metricbeat step check + if s.Process == "metricbeat" { + mbFound = true + checkStep(t, "metricbeat", sampleOutput, s) + } + } + + if tc.FilebeatStep != fbFound { + t.Fatalf("Steps for filebeat do not match. Was expected: %v, Was found: %v", tc.FilebeatStep, fbFound) + } + + if tc.MetricbeatStep != mbFound { + t.Fatalf("Steps for metricbeat do not match. Was expected: %v, Was found: %v", tc.MetricbeatStep, mbFound) + } + }) + } +} + +func checkStep(t *testing.T, stepName string, expectedOutput interface{}, s configrequest.Step) { + if meta := s.Meta[configrequest.MetaConfigKey]; meta != nil { + mapstr, ok := meta.(map[string]interface{}) + if !ok { + t.Fatalf("no meta config for %s step", stepName) + } + + esOut, ok := mapstr["output"].(map[string]interface{}) + if !ok { + t.Fatalf("output not found for %s step", stepName) + } + + if actualOutput := esOut["elasticsearch"]; actualOutput != expectedOutput { + t.Fatalf("output for %s step does not match. expected: %v, got %v", stepName, expectedOutput, actualOutput) + } + } +} + +func getMonitorableTestOperator(t *testing.T, installPath string) (*Operator, *operatorCfg.Config) { + operatorConfig := &operatorCfg.Config{ + RetryConfig: &retry.Config{ + Enabled: true, + RetriesCount: 2, + Delay: 3 * time.Second, + MaxDelay: 10 * time.Second, + }, + ProcessConfig: &process.Config{}, + DownloadConfig: &artifact.Config{ + InstallPath: installPath, + OperatingSystem: "darwin", + }, + MonitoringConfig: &monitoring.Config{ + MonitorMetrics: true, + }, + } + + cfg, err := config.NewConfigFrom(operatorConfig) + if err != nil { + t.Fatal(err) + } + + l := getLogger() + + fetcher := &DummyDownloader{} + installer := &DummyInstaller{} + + stateResolver, err := stateresolver.NewStateResolver(l) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + operator, err := NewOperator(ctx, l, "p1", cfg, fetcher, installer, stateResolver, nil) + if err != nil { + t.Fatal(err) + } + + monitor := beats.NewMonitor("dummmy", "p1234", &artifact.Config{OperatingSystem: "linux", InstallPath: "/install/path"}, true, true) + operator.apps["dummy"] = &testMonitorableApp{monitor: monitor} + + return operator, operatorConfig +} + +type testMonitorableApp struct { + monitor *beats.Monitor +} + +func (*testMonitorableApp) Name() string { return "" } +func (*testMonitorableApp) Start(_ context.Context, cfg map[string]interface{}) error { return nil } +func (*testMonitorableApp) Stop() {} +func (*testMonitorableApp) Configure(_ context.Context, config map[string]interface{}) error { + return nil +} +func (*testMonitorableApp) State() state.State { return state.State{} } +func (a *testMonitorableApp) Monitor() monitoring.Monitor { return a.monitor } diff --git a/x-pack/agent/pkg/agent/operation/operation.go b/x-pack/agent/pkg/agent/operation/operation.go new file mode 100644 index 00000000000..b18fab74db1 --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/operation.go @@ -0,0 +1,48 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operation + +import ( + "context" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app/monitoring" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/state" +) + +// operation is an operation definition +// each operation needs to implement this interface in order +// to ease up rollbacks +type operation interface { + // Name is human readable name which identifies an operation + Name() string + // Check checks whether operation needs to be run + // In case prerequisites (such as invalid cert or tweaked binary) are not met, it returns error + // examples: + // - Start does not need to run if process is running + // - Fetch does not need to run if package is already present + Check() (bool, error) + // Run runs the operation + Run(ctx context.Context, application Application) error +} + +// Application is an application capable of being started, stopped and configured. +type Application interface { + Name() string + Start(ctx context.Context, cfg map[string]interface{}) error + Stop() + Configure(ctx context.Context, config map[string]interface{}) error + State() state.State + Monitor() monitoring.Monitor +} + +// Descriptor defines a program which needs to be run. +// Is passed around operator operations. +type Descriptor interface { + BinaryName() string + Version() string + ID() string + Directory() string + IsGrpcConfigurable() bool +} diff --git a/x-pack/agent/pkg/agent/operation/operation_config.go b/x-pack/agent/pkg/agent/operation/operation_config.go new file mode 100644 index 00000000000..92722eba0dc --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/operation_config.go @@ -0,0 +1,67 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operation + +import ( + "context" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/operation/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +var ( + // ErrClientNotFound is an error when client is not found + ErrClientNotFound = errors.New("client not found, check if process is running") + // ErrClientNotConfigurable happens when stored client does not implement Config func + ErrClientNotConfigurable = errors.New("client does not provide configuration") +) + +// Configures running process by sending a configuration to its +// grpc endpoint +type operationConfig struct { + logger *logger.Logger + operatorConfig *config.Config + cfg map[string]interface{} + eventProcessor callbackHooks +} + +func newOperationConfig( + logger *logger.Logger, + operatorConfig *config.Config, + cfg map[string]interface{}, + eventProcessor callbackHooks) *operationConfig { + return &operationConfig{ + logger: logger, + operatorConfig: operatorConfig, + cfg: cfg, + eventProcessor: eventProcessor, + } +} + +// Name is human readable name identifying an operation +func (o *operationConfig) Name() string { + return "operation-config" +} + +// Check checks whether operation needs to be run +// examples: +// - Start does not need to run if process is running +// - Fetch does not need to run if package is already present +func (o *operationConfig) Check() (bool, error) { return true, nil } + +// Run runs the operation +func (o *operationConfig) Run(ctx context.Context, application Application) (err error) { + defer func() { + if err != nil { + err = errors.New(err, + o.Name(), + errors.TypeApplication, + errors.M(errors.MetaKeyAppName, application.Name())) + o.eventProcessor.OnFailing(ctx, application.Name(), err) + } + }() + return application.Configure(ctx, o.cfg) +} diff --git a/x-pack/agent/pkg/agent/operation/operation_fetch.go b/x-pack/agent/pkg/agent/operation/operation_fetch.go new file mode 100644 index 00000000000..df1556544f2 --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/operation_fetch.go @@ -0,0 +1,87 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operation + +import ( + "context" + "os" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/operation/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +// operationFetch fetches artifact from preconfigured source +// skips if artifact is already downloaded +type operationFetch struct { + logger *logger.Logger + program Descriptor + operatorConfig *config.Config + downloader download.Downloader + eventProcessor callbackHooks +} + +func newOperationFetch( + logger *logger.Logger, + program Descriptor, + operatorConfig *config.Config, + downloader download.Downloader, + eventProcessor callbackHooks) *operationFetch { + + return &operationFetch{ + logger: logger, + program: program, + operatorConfig: operatorConfig, + downloader: downloader, + eventProcessor: eventProcessor, + } +} + +// Name is human readable name identifying an operation +func (o *operationFetch) Name() string { + return "operation-fetch" +} + +// Check checks whether operation needs to be run +// examples: +// - Start does not need to run if process is running +// - Fetch does not need to run if package is already present +func (o *operationFetch) Check() (bool, error) { + downloadConfig := o.operatorConfig.DownloadConfig + fullPath, err := artifact.GetArtifactPath(o.program.BinaryName(), o.program.Version(), downloadConfig.OS(), downloadConfig.Arch(), downloadConfig.TargetDirectory) + if err != nil { + return false, err + } + + _, err = os.Stat(fullPath) + if os.IsNotExist(err) { + return true, nil + } + + o.logger.Infof("%s.%s already exists in %s. Skipping operation %s", o.program.BinaryName(), o.program.Version(), fullPath, o.Name()) + return false, err +} + +// Run runs the operation +func (o *operationFetch) Run(ctx context.Context, application Application) (err error) { + defer func() { + if err != nil { + err = errors.New(err, + o.Name(), + errors.TypeApplication, + errors.M(errors.MetaKeyAppName, application.Name())) + o.eventProcessor.OnFailing(ctx, application.Name(), err) + } + }() + + fullPath, err := o.downloader.Download(ctx, o.program.BinaryName(), o.program.Version()) + if err == nil { + o.logger.Infof("operation '%s' downloaded %s.%s into %s", o.Name(), o.program.BinaryName(), o.program.Version(), fullPath) + } + + return err +} diff --git a/x-pack/agent/pkg/agent/operation/operation_install.go b/x-pack/agent/pkg/agent/operation/operation_install.go new file mode 100644 index 00000000000..6f1ac2b12e4 --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/operation_install.go @@ -0,0 +1,71 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operation + +import ( + "context" + "os" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/operation/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/install" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +// operationInstall installs a artifact from predefined location +// skips if artifact is already installed +type operationInstall struct { + logger *logger.Logger + program Descriptor + operatorConfig *config.Config + installer install.Installer + eventProcessor callbackHooks +} + +func newOperationInstall( + logger *logger.Logger, + program Descriptor, + operatorConfig *config.Config, + installer install.Installer, + eventProcessor callbackHooks) *operationInstall { + + return &operationInstall{ + logger: logger, + program: program, + operatorConfig: operatorConfig, + installer: installer, + eventProcessor: eventProcessor, + } +} + +// Name is human readable name identifying an operation +func (o *operationInstall) Name() string { + return "operation-install" +} + +// Check checks whether operation needs to be run +// examples: +// - Start does not need to run if process is running +// - Fetch does not need to run if package is already present +func (o *operationInstall) Check() (bool, error) { + installDir := o.program.Directory() + _, err := os.Stat(installDir) + return os.IsNotExist(err), nil +} + +// Run runs the operation +func (o *operationInstall) Run(ctx context.Context, application Application) (err error) { + defer func() { + if err != nil { + err = errors.New(err, + o.Name(), + errors.TypeApplication, + errors.M(errors.MetaKeyAppName, application.Name())) + o.eventProcessor.OnFailing(ctx, application.Name(), err) + } + }() + + return o.installer.Install(o.program.BinaryName(), o.program.Version(), o.program.Directory()) +} diff --git a/x-pack/agent/pkg/agent/operation/operation_remove.go b/x-pack/agent/pkg/agent/operation/operation_remove.go new file mode 100644 index 00000000000..4d93d585095 --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/operation_remove.go @@ -0,0 +1,48 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operation + +import ( + "context" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" +) + +// operationRemove uninstall and removes all the bits related to the artifact +type operationRemove struct { + eventProcessor callbackHooks +} + +func newOperationRemove(eventProcessor callbackHooks) *operationRemove { + return &operationRemove{eventProcessor: eventProcessor} +} + +// Name is human readable name identifying an operation +func (o *operationRemove) Name() string { + return "operation-remove" +} + +// Check checks whether operation needs to be run +// examples: +// - Start does not need to run if process is running +// - Fetch does not need to run if package is already present +func (o *operationRemove) Check() (bool, error) { + return false, nil +} + +// Run runs the operation +func (o *operationRemove) Run(ctx context.Context, application Application) (err error) { + defer func() { + if err != nil { + o.eventProcessor.OnFailing(ctx, application.Name(), err) + err = errors.New(err, + o.Name(), + errors.TypeApplication, + errors.M(errors.MetaKeyAppName, application.Name())) + } + }() + + return nil +} diff --git a/x-pack/agent/pkg/agent/operation/operation_start.go b/x-pack/agent/pkg/agent/operation/operation_start.go new file mode 100644 index 00000000000..f09c5dc85e3 --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/operation_start.go @@ -0,0 +1,76 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operation + +import ( + "context" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/operation/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/process" +) + +// operationStart start installed process +// skips if process is already running +type operationStart struct { + program app.Descriptor + logger *logger.Logger + operatorConfig *config.Config + cfg map[string]interface{} + eventProcessor callbackHooks + + pi *process.Info +} + +func newOperationStart( + logger *logger.Logger, + operatorConfig *config.Config, + cfg map[string]interface{}, + eventProcessor callbackHooks) *operationStart { + // TODO: make configurable + + return &operationStart{ + logger: logger, + operatorConfig: operatorConfig, + cfg: cfg, + eventProcessor: eventProcessor, + } +} + +// Name is human readable name identifying an operation +func (o *operationStart) Name() string { + return "operation-start" +} + +// Check checks whether operation needs to be run +// examples: +// - Start does not need to run if process is running +// - Fetch does not need to run if package is already present +func (o *operationStart) Check() (bool, error) { + // TODO: get running processes and compare hashes + + return true, nil +} + +// Run runs the operation +func (o *operationStart) Run(ctx context.Context, application Application) (err error) { + o.eventProcessor.OnStarting(ctx, application.Name()) + defer func() { + if err != nil { + // kill the process if something failed + err = errors.New(err, + o.Name(), + errors.TypeApplication, + errors.M(errors.MetaKeyAppName, application.Name())) + o.eventProcessor.OnFailing(ctx, application.Name(), err) + } else { + o.eventProcessor.OnRunning(ctx, application.Name()) + } + }() + + return application.Start(ctx, o.cfg) +} diff --git a/x-pack/agent/pkg/agent/operation/operation_stop.go b/x-pack/agent/pkg/agent/operation/operation_stop.go new file mode 100644 index 00000000000..de7809612c0 --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/operation_stop.go @@ -0,0 +1,64 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operation + +import ( + "context" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/operation/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +// operationStop stops the running process +// skips if process is already skipped +type operationStop struct { + logger *logger.Logger + operatorConfig *config.Config + eventProcessor callbackHooks +} + +func newOperationStop( + logger *logger.Logger, + operatorConfig *config.Config, + eventProcessor callbackHooks) *operationStop { + return &operationStop{ + logger: logger, + operatorConfig: operatorConfig, + eventProcessor: eventProcessor, + } +} + +// Name is human readable name identifying an operation +func (o *operationStop) Name() string { + return "operation-stop" +} + +// Check checks whether operation needs to be run +// examples: +// - Start does not need to run if process is running +// - Fetch does not need to run if package is already present +func (o *operationStop) Check() (bool, error) { + return true, nil +} + +// Run runs the operation +func (o *operationStop) Run(ctx context.Context, application Application) (err error) { + o.eventProcessor.OnStopping(ctx, application.Name()) + defer func() { + if err != nil { + err = errors.New(err, + o.Name(), + errors.TypeApplication, + errors.M(errors.MetaKeyAppName, application.Name())) + o.eventProcessor.OnFailing(ctx, application.Name(), err) + } else { + o.eventProcessor.OnStopped(ctx, application.Name()) + } + }() + + application.Stop() + return nil +} diff --git a/x-pack/agent/pkg/agent/operation/operation_verify.go b/x-pack/agent/pkg/agent/operation/operation_verify.go new file mode 100644 index 00000000000..b2ab818eb7d --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/operation_verify.go @@ -0,0 +1,49 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operation + +import ( + "context" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" +) + +// operationVerify verifies downloaded artifact for correct signature +// skips if artifact is already installed +type operationVerify struct { + eventProcessor callbackHooks +} + +func newOperationVerify(eventProcessor callbackHooks) *operationVerify { + return &operationVerify{eventProcessor: eventProcessor} +} + +// Name is human readable name identifying an operation +func (o *operationVerify) Name() string { + return "operation-verify" +} + +// Check checks whether operation needs to be run +// examples: +// - Start does not need to run if process is running +// - Fetch does not need to run if package is already present +func (o *operationVerify) Check() (bool, error) { + return false, nil +} + +// Run runs the operation +func (o *operationVerify) Run(ctx context.Context, application Application) (err error) { + defer func() { + if err != nil { + err = errors.New(err, + o.Name(), + errors.TypeApplication, + errors.M(errors.MetaKeyAppName, application.Name())) + o.eventProcessor.OnFailing(ctx, application.Name(), err) + } + }() + + return nil +} diff --git a/x-pack/agent/pkg/agent/operation/operator.go b/x-pack/agent/pkg/agent/operation/operator.go new file mode 100644 index 00000000000..33cbea4fd15 --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/operator.go @@ -0,0 +1,275 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operation + +import ( + "context" + "fmt" + "os" + "strings" + "sync" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/configrequest" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + operatorCfg "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/operation/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/program" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/stateresolver" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/install" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app/monitoring" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/retry" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/state" + rconfig "github.com/elastic/beats/v7/x-pack/agent/pkg/core/remoteconfig/grpc" +) + +// Operator runs Start/Stop/Update operations +// it is responsible for detecting reconnect to existing processes +// based on backed up configuration +// Enables running sidecars for processes. +// TODO: implement retry strategies +type Operator struct { + bgContext context.Context + pipelineID string + logger *logger.Logger + config *operatorCfg.Config + handlers map[string]handleFunc + stateResolver *stateresolver.StateResolver + eventProcessor callbackHooks + isMonitoring bool + + apps map[string]Application + appsLock sync.Mutex + + downloader download.Downloader + installer install.Installer +} + +// NewOperator creates a new operator, this operator holds +// a collection of running processes, back it up +// Based on backed up collection it prepares clients, watchers... on init +func NewOperator( + ctx context.Context, + logger *logger.Logger, + pipelineID string, + config *config.Config, + fetcher download.Downloader, + installer install.Installer, + stateResolver *stateresolver.StateResolver, + eventProcessor callbackHooks) (*Operator, error) { + + operatorConfig := defaultOperatorConfig() + if err := config.Unpack(&operatorConfig); err != nil { + return nil, err + } + + if operatorConfig.DownloadConfig == nil { + return nil, fmt.Errorf("artifacts configuration not provided") + } + + if eventProcessor == nil { + eventProcessor = &noopCallbackHooks{} + } + + operator := &Operator{ + bgContext: ctx, + config: operatorConfig, + pipelineID: pipelineID, + logger: logger, + downloader: fetcher, + installer: installer, + stateResolver: stateResolver, + apps: make(map[string]Application), + eventProcessor: eventProcessor, + } + + operator.initHandlerMap() + + os.MkdirAll(operatorConfig.DownloadConfig.TargetDirectory, 0755) + os.MkdirAll(operatorConfig.DownloadConfig.InstallPath, 0755) + + return operator, nil +} + +func defaultOperatorConfig() *operatorCfg.Config { + return &operatorCfg.Config{ + MonitoringConfig: &monitoring.Config{ + MonitorLogs: false, + MonitorMetrics: false, + }, + RetryConfig: &retry.Config{ + Enabled: false, + RetriesCount: 0, + Delay: 30 * time.Second, + MaxDelay: 5 * time.Minute, + Exponential: false, + }, + } +} + +// State describes the current state of the system. +// Reports all known beats and theirs states. Whether they are running +// or not, and if they are information about process is also present. +func (o *Operator) State() map[string]state.State { + result := make(map[string]state.State) + + o.appsLock.Lock() + defer o.appsLock.Unlock() + + for k, v := range o.apps { + result[k] = v.State() + } + + return result +} + +// HandleConfig handles configuration for a pipeline and performs actions to achieve this configuration. +func (o *Operator) HandleConfig(cfg configrequest.Request) error { + _, steps, ack, err := o.stateResolver.Resolve(cfg) + if err != nil { + return errors.New(err, errors.TypeConfig, fmt.Sprintf("operator: failed to resolve configuration %s, error: %v", cfg, err)) + } + + for _, step := range steps { + if strings.ToLower(step.Process) != strings.ToLower(monitoringName) { + if _, isSupported := program.SupportedMap[strings.ToLower(step.Process)]; !isSupported { + return errors.New(fmt.Sprintf("program '%s' is not supported", step.Process), + errors.TypeApplication, + errors.M(errors.MetaKeyAppName, step.Process)) + } + } + + handler, found := o.handlers[step.ID] + if !found { + return errors.New(fmt.Sprintf("operator: received unexpected event '%s'", step.ID), errors.TypeConfig) + } + + if err := handler(step); err != nil { + return errors.New(err, errors.TypeConfig, fmt.Sprintf("operator: failed to execute step %s, error: %v", step.ID, err)) + } + } + + // Ack the resolver should state for next call. + ack() + + return nil +} + +// Start starts a new process based on a configuration +// specific configuration of new process is passed +func (o *Operator) start(p Descriptor, cfg map[string]interface{}) (err error) { + flow := []operation{ + newOperationFetch(o.logger, p, o.config, o.downloader, o.eventProcessor), + newOperationVerify(o.eventProcessor), + newOperationInstall(o.logger, p, o.config, o.installer, o.eventProcessor), + newOperationStart(o.logger, o.config, cfg, o.eventProcessor), + newOperationConfig(o.logger, o.config, cfg, o.eventProcessor), + } + return o.runFlow(p, flow) +} + +// Stop stops the running process, if process is already stopped it does not return an error +func (o *Operator) stop(p Descriptor) (err error) { + flow := []operation{ + newOperationStop(o.logger, o.config, o.eventProcessor), + } + + return o.runFlow(p, flow) +} + +// PushConfig tries to push config to a running process +func (o *Operator) pushConfig(p Descriptor, cfg map[string]interface{}) error { + var flow []operation + configurable := p.IsGrpcConfigurable() + + if configurable { + flow = []operation{ + newOperationConfig(o.logger, o.config, cfg, o.eventProcessor), + } + } else { + flow = []operation{ + // updates a configuration file and restarts a process + newOperationStop(o.logger, o.config, o.eventProcessor), + newOperationStart(o.logger, o.config, cfg, o.eventProcessor), + } + } + + return o.runFlow(p, flow) +} + +func (o *Operator) runFlow(p Descriptor, operations []operation) error { + if len(operations) == 0 { + o.logger.Infof("operator received event with no operations for program '%s'", p.ID()) + return nil + } + + app, err := o.getApp(p) + if err != nil { + return err + } + + for _, op := range operations { + shouldRun, err := op.Check() + if err != nil { + return err + } + + if !shouldRun { + o.logger.Infof("operation '%s' skipped for %s.%s", op.Name(), p.BinaryName(), p.Version()) + continue + } + + if err := op.Run(o.bgContext, app); err != nil { + return err + } + } + + return nil +} + +func (o *Operator) getApp(p Descriptor) (Application, error) { + o.appsLock.Lock() + defer o.appsLock.Unlock() + + id := p.ID() + + if a, ok := o.apps[id]; ok { + return a, nil + } + + factory := rconfig.NewConnFactory(o.config.RetryConfig.Delay, o.config.RetryConfig.MaxDelay) + + specifier, ok := p.(app.Specifier) + if !ok { + return nil, fmt.Errorf("descriptor is not an app.Specifier") + } + + monitor := monitoring.NewMonitor(isMonitorable(p), p.BinaryName(), o.pipelineID, o.config.DownloadConfig, o.config.MonitoringConfig.MonitorLogs, o.config.MonitoringConfig.MonitorMetrics) + + a, err := app.NewApplication(p.ID(), p.BinaryName(), o.pipelineID, specifier, factory, o.config, o.logger, o.eventProcessor.OnFailing, monitor) + if err != nil { + return nil, err + } + + o.apps[id] = a + return a, nil +} + +func isMonitorable(descriptor Descriptor) bool { + type taggable interface { + Tags() map[app.Tag]string + } + + if taggable, ok := descriptor.(taggable); ok { + tags := taggable.Tags() + _, isSidecar := tags[app.TagSidecar] + return !isSidecar // everything is monitorable except sidecar + } + + return false +} diff --git a/x-pack/agent/pkg/agent/operation/operator_handlers.go b/x-pack/agent/pkg/agent/operation/operator_handlers.go new file mode 100644 index 00000000000..2d583f8a12d --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/operator_handlers.go @@ -0,0 +1,95 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operation + +import ( + "fmt" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/configrequest" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app" +) + +type handleFunc func(step configrequest.Step) error + +func (o *Operator) initHandlerMap() { + hm := make(map[string]handleFunc) + + hm[configrequest.StepRun] = o.handleRun + hm[configrequest.StepRemove] = o.handleRemove + + o.handlers = hm +} + +func (o *Operator) handleRun(step configrequest.Step) error { + if step.Process == monitoringName { + return o.handleStartSidecar(step) + } + + p, cfg, err := getProgramFromStep(step, o.config.DownloadConfig) + if err != nil { + return errors.New(err, + "operator.handleStart failed to create program", + errors.TypeApplication, + errors.M(errors.MetaKeyAppName, step.Process)) + } + + return o.start(p, cfg) +} + +func (o *Operator) handleRemove(step configrequest.Step) error { + if step.Process == monitoringName { + return o.handleStopSidecar(step) + } + + p, _, err := getProgramFromStep(step, o.config.DownloadConfig) + if err != nil { + return errors.New(err, + "operator.handleRemove failed to stop program", + errors.TypeApplication, + errors.M(errors.MetaKeyAppName, step.Process)) + } + + return o.stop(p) +} + +func getProgramFromStep(step configrequest.Step, artifactConfig *artifact.Config) (Descriptor, map[string]interface{}, error) { + return getProgramFromStepWithTags(step, artifactConfig, nil) +} + +func getProgramFromStepWithTags(step configrequest.Step, artifactConfig *artifact.Config, tags map[app.Tag]string) (Descriptor, map[string]interface{}, error) { + config, err := getConfigFromStep(step) + if err != nil { + return nil, nil, err + } + + p := app.NewDescriptor(step.Process, step.Version, artifactConfig, tags) + return p, config, nil +} + +func getConfigFromStep(step configrequest.Step) (map[string]interface{}, error) { + metConfig, hasConfig := step.Meta[configrequest.MetaConfigKey] + + if !hasConfig && needsMetaConfig(step) { + return nil, fmt.Errorf("step: %s, no config in metadata", step.ID) + } + + var config map[string]interface{} + if hasConfig { + var ok bool + config, ok = metConfig.(map[string]interface{}) + if !ok { + return nil, errors.New(errors.TypeConfig, + fmt.Sprintf("step: %s, program config is in invalid format", step.ID)) + } + } + + return config, nil +} + +func needsMetaConfig(step configrequest.Step) bool { + return step.ID == configrequest.StepRun +} diff --git a/x-pack/agent/pkg/agent/operation/operator_test.go b/x-pack/agent/pkg/agent/operation/operator_test.go new file mode 100644 index 00000000000..60825009874 --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/operator_test.go @@ -0,0 +1,454 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build linux darwin + +package operation + +import ( + "context" + "fmt" + "math/rand" + "os" + "path/filepath" + "testing" + "time" + + operatorCfg "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/operation/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/program" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/stateresolver" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/install" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app/monitoring" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/process" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/retry" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/state" +) + +var installPath = "tests/scripts" + +func TestMain(m *testing.M) { + // init supported with test cases + shortSpec := program.Spec{ + Name: "short", + Cmd: "/bin/echo", + Configurable: "file", + Args: []string{"123"}, + } + longSpec := program.Spec{ + Name: "long", + Cmd: "/bin/sh", + Configurable: "file", + Args: []string{"-c", "echo 123; sleep 100"}, + } + configurableSpec := program.Spec{ + Name: "configurable", + Cmd: "configurable", + Configurable: "file", + Args: []string{}, + } + configByFileSpec := program.Spec{ + Name: "configurablebyfile", + Cmd: "configurablebyfile", + Configurable: "file", + Args: []string{}, + } + + program.Supported = append(program.Supported, shortSpec, longSpec, configurableSpec, configByFileSpec) +} + +func TestNotSupported(t *testing.T) { + p := getProgram("notsupported", "1.0") + + operator, _ := getTestOperator(t, "tests/scripts") + err := operator.start(p, nil) + if err == nil { + t.Fatal("was expecting error but got none") + } +} + +func TestShortRun(t *testing.T) { + p := getProgram("short", "1.0") + + operator, _ := getTestOperator(t, "tests/scripts") + if err := operator.start(p, nil); err != nil { + t.Fatal(err) + } + + // let the watcher kick in + <-time.After(1 * time.Second) + + items := operator.State() + if len(items) == 1 && items[p.ID()].Status == state.Running { + t.Fatalf("Process reattach info not stopped %#v, %+v", items, items[p.ID()].Status) + } + + os.Remove(filepath.Join(operator.config.DownloadConfig.InstallPath, "short--1.0.yml")) +} + +func TestShortRunInvalid(t *testing.T) { + p := getProgram("bumblebee", "") + operator, _ := getTestOperator(t, "/bin") + if err := operator.start(p, nil); err == nil { + t.Fatal(err) + } + + // let the watcher kick in + <-time.After(1 * time.Second) + + items := operator.State() + if len(items) == 1 && items[p.ID()].Status == state.Running { + t.Fatalf("Process reattach info not stopped %#v, %+v", items, items[p.ID()].Status) + } +} + +func TestLongRunWithStop(t *testing.T) { + p := getProgram("long", "1.0") + + operator, _ := getTestOperator(t, "tests/scripts") + if err := operator.start(p, nil); err != nil { + t.Fatal(err) + } + + // wait for watcher so we know it was now cancelled immediately + <-time.After(1 * time.Second) + + items := operator.State() + item0, ok := items[p.ID()] + if !ok || item0.Status != state.Running { + t.Fatalf("Process not running %#v", items) + } + + pid := item0.ProcessInfo.PID + + // stop the process + if err := operator.stop(p); err != nil { + t.Fatalf("Failed to stop process with PID %d: %v", pid, err) + } + + // let the watcher kick in + <-time.After(1 * time.Second) + + // check state updated + items = operator.State() + item1, ok := items[p.ID()] + if !ok || item1.Status == state.Running { + t.Fatalf("Process state says running after Stop %#v", items) + } + + // check process stopped + proc, err := os.FindProcess(pid) + if err != nil && proc != nil { + t.Fatal("Process found") + } +} + +func TestLongRunWithCrash(t *testing.T) { + p := getProgram("long", "1.0") + + operator, _ := getTestOperator(t, "tests/scripts") + if err := operator.start(p, nil); err != nil { + t.Fatal(err) + } + + // wait for watcher so we know it was now cancelled immediately + <-time.After(1 * time.Second) + + items := operator.State() + item0, ok := items[p.ID()] + if !ok || item0.Status != state.Running { + t.Fatalf("Process not running %#v", items) + } + + // crash the process + pid := item0.ProcessInfo.PID + proc, err := os.FindProcess(pid) + if err != nil { + t.Fatalf("Failed to get process with PID %d: %v", pid, err) + } + if err := proc.Kill(); err != nil { + t.Fatalf("Failed to kill process with PID %d: %v", pid, err) + } + + // let the watcher kick in + <-time.After(3 * time.Second) + + // check process restarted + items = operator.State() + item1, ok := items[p.ID()] + if !ok || item1.Status != state.Running { + t.Fatalf("Process not present after restart %#v", items) + } + + newPid := item1.ProcessInfo.PID + if pid == newPid { + t.Fatalf("Process not restarted, still with the same PID %d", pid) + } + + // stop restarted process + if err := operator.stop(p); err != nil { + t.Fatalf("Failed to stop restarted process %d: %v", newPid, err) + } +} + +func TestTwoProcesses(t *testing.T) { + p := getProgram("long", "1.0") + + operator, _ := getTestOperator(t, "tests/scripts") + if err := operator.start(p, nil); err != nil { + t.Fatal(err) + } + + // wait for watcher so we know it was now cancelled immediately + <-time.After(1 * time.Second) + + items := operator.State() + item0, ok := items[p.ID()] + if !ok || item0.Status != state.Running { + t.Fatalf("Process not running %#v", items) + } + + // start the same process again + if err := operator.start(p, nil); err != nil { + t.Fatal(err) + } + + // let the watcher kick in + <-time.After(1 * time.Second) + + items = operator.State() + item1, ok := items[p.ID()] + if !ok || item0.Status != state.Running { + t.Fatalf("Process not running %#v", items) + } + + if item0.ProcessInfo.PID != item1.ProcessInfo.PID { + t.Fatal("Process got updated, expected the same") + } + + // check process restarted + operator.stop(p) +} + +func TestConfigurableRun(t *testing.T) { + p := getProgram("configurable", "1.0") + + spec := p.Spec() + if s, err := os.Stat(spec.BinaryPath); err != nil || s == nil { + t.Fatalf("binary not available %s", spec.BinaryPath) + } else { + t.Logf("found file %v", spec.BinaryPath) + } + + operator, _ := getTestOperator(t, installPath) + if err := operator.start(p, nil); err != nil { + t.Fatal(err) + } + + // wait for watcher so we know it was now cancelled immediately + <-time.After(1 * time.Second) + + items := operator.State() + item0, ok := items[p.ID()] + if !ok || item0.Status != state.Running { + t.Fatalf("Process not running %#v", items) + } + + pid := item0.ProcessInfo.PID + + // check it is still running + <-time.After(2 * time.Second) + + items = operator.State() + item1, ok := items[p.ID()] + if !ok || item1.Status != state.Running { + t.Fatalf("Process stopped running %#v", items) + } + + newPID := item1.ProcessInfo.PID + if pid != newPID { + t.Fatalf("Process crashed in between first pid: '%v' second pid: '%v'", pid, newPID) + } + + // try to configure + cfg := make(map[string]interface{}) + tstFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("tmp%d", rand.Uint32())) + cfg["TestFile"] = tstFilePath + if err := operator.pushConfig(p, cfg); err != nil { + t.Fatalf("failed to config: %v", err) + } + + if s, err := os.Stat(tstFilePath); err != nil || s == nil { + t.Fatalf("failed to create a file using Config call %s", tstFilePath) + } + + // stop the process + if err := operator.stop(p); err != nil { + t.Fatalf("Failed to stop process with PID %d: %v", pid, err) + } + + // let the watcher kick in + <-time.After(1 * time.Second) + + // check reattach collection cleaned up + items = operator.State() + item2, ok := items[p.ID()] + if !ok || item2.Status == state.Running { + t.Fatalf("Process still running after stop %#v", items) + } + + // check process stopped + proc, err := os.FindProcess(pid) + if err != nil && proc != nil { + t.Fatal("Process found") + } +} + +func TestConfigurableByFileRun(t *testing.T) { + cfg := make(map[string]interface{}) + cfg["TestFile"] = "tstFilePath" + downloadCfg := &artifact.Config{ + InstallPath: installPath, + OperatingSystem: "darwin", + } + + p := app.NewDescriptor("configurablebyfile", "1.0", downloadCfg, nil) + installPath := "tests/scripts" + spec := p.Spec() + if s, err := os.Stat(spec.BinaryPath); err != nil || s == nil { + t.Fatalf("binary not available %s", spec.BinaryPath) + } else { + t.Logf("found file %v", spec.BinaryPath) + } + + operator, _ := getTestOperator(t, installPath) + if err := operator.start(p, cfg); err != nil { + t.Fatal(err) + } + + // wait for watcher so we know it was now cancelled immediately + <-time.After(1 * time.Second) + + items := operator.State() + item0, ok := items[p.ID()] + if !ok || item0.Status != state.Running { + t.Fatalf("Process not running %#v", items) + } + + // check it is still running + <-time.After(2 * time.Second) + + items = operator.State() + item1, ok := items[p.ID()] + if !ok || item1.Status != state.Running { + t.Fatalf("Process not running anymore %#v", items) + } + + if item0.ProcessInfo.PID != item1.ProcessInfo.PID { + t.Fatalf("Process crashed in between first pid: '%v' second pid: '%v'", item0.ProcessInfo.PID, item1.ProcessInfo.PID) + } + + // stop the process + if err := operator.stop(p); err != nil { + t.Fatalf("Failed to stop process with PID %d: %v", item1.ProcessInfo.PID, err) + } + + // let the watcher kick in + <-time.After(1 * time.Second) + + // check reattach collection cleaned up + items = operator.State() + item2, ok := items[p.ID()] + if !ok || item2.Status == state.Running { + t.Fatalf("Process still running after stop %#v", items) + } + + // check process stopped + proc, err := os.FindProcess(item1.ProcessInfo.PID) + if err != nil && proc != nil { + t.Fatal("Process found") + } +} + +func getTestOperator(t *testing.T, installPath string) (*Operator, *operatorCfg.Config) { + operatorConfig := &operatorCfg.Config{ + RetryConfig: &retry.Config{ + Enabled: true, + RetriesCount: 2, + Delay: 3 * time.Second, + MaxDelay: 10 * time.Second, + }, + ProcessConfig: &process.Config{}, + DownloadConfig: &artifact.Config{ + InstallPath: installPath, + }, + MonitoringConfig: &monitoring.Config{ + MonitorMetrics: false, + }, + } + + cfg, err := config.NewConfigFrom(operatorConfig) + if err != nil { + t.Fatal(err) + } + + l := getLogger() + + fetcher := &DummyDownloader{} + installer := &DummyInstaller{} + + stateResolver, err := stateresolver.NewStateResolver(l) + if err != nil { + t.Fatal(err) + } + + operator, err := NewOperator(context.Background(), l, "p1", cfg, fetcher, installer, stateResolver, nil) + if err != nil { + t.Fatal(err) + } + + operator.config.DownloadConfig.OperatingSystem = "darwin" + operator.config.DownloadConfig.Architecture = "32" + + return operator, operatorConfig +} + +func getLogger() *logger.Logger { + l, _ := logger.New() + return l +} + +func getProgram(binary, version string) *app.Descriptor { + downloadCfg := &artifact.Config{ + InstallPath: installPath, + OperatingSystem: "darwin", + } + return app.NewDescriptor(binary, version, downloadCfg, nil) +} + +type TestConfig struct { + TestFile string +} + +type DummyDownloader struct { +} + +func (*DummyDownloader) Download(_ context.Context, p, v string) (string, error) { + return "", nil +} + +var _ download.Downloader = &DummyDownloader{} + +type DummyInstaller struct { +} + +func (*DummyInstaller) Install(p, v, _ string) error { + return nil +} + +var _ install.Installer = &DummyInstaller{} diff --git a/x-pack/agent/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86/README.md b/x-pack/agent/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86/README.md new file mode 100644 index 00000000000..71e80463f7c --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86/README.md @@ -0,0 +1 @@ +Testing program emulating tool which is configurable using GRPC communication channel. diff --git a/x-pack/agent/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86/main.go b/x-pack/agent/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86/main.go new file mode 100644 index 00000000000..78bfeaf2f9d --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86/main.go @@ -0,0 +1,63 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "context" + "os" + "path/filepath" + + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/server" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/remoteconfig/grpc" +) + +func main() { + f, _ := os.OpenFile(filepath.Join(os.TempDir(), "testing.out"), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) + f.WriteString("starting \n") + s := &configServer{} + if err := server.NewGrpcServer(os.Stdin, s); err != nil { + f.WriteString(err.Error()) + panic(err) + } + f.WriteString("finished \n") +} + +type configServer struct { +} + +// TestConfig is a configuration for testing Config calls +type TestConfig struct { + TestFile string `config:"TestFile" yaml:"TestFile"` +} + +func (*configServer) Config(ctx context.Context, req *grpc.ConfigRequest) (*grpc.ConfigResponse, error) { + cfgString := req.GetConfig() + + testCfg := &TestConfig{} + if err := yaml.Unmarshal([]byte(cfgString), &testCfg); err != nil { + return &grpc.ConfigResponse{}, err + } + + if testCfg.TestFile != "" { + tf, err := os.Create(testCfg.TestFile) + if err != nil { + return &grpc.ConfigResponse{}, err + } + + err = tf.Close() + if err != nil { + return &grpc.ConfigResponse{}, err + } + } + + return &grpc.ConfigResponse{}, nil +} + +// Status return ok. +func (*configServer) Status(ctx context.Context, req *grpc.StatusRequest) (*grpc.StatusResponse, error) { + return &grpc.StatusResponse{Status: "ok"}, nil +} diff --git a/x-pack/agent/pkg/agent/operation/tests/scripts/configurablebyfile-1.0-darwin-x86/README.md b/x-pack/agent/pkg/agent/operation/tests/scripts/configurablebyfile-1.0-darwin-x86/README.md new file mode 100644 index 00000000000..2acff814a29 --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/tests/scripts/configurablebyfile-1.0-darwin-x86/README.md @@ -0,0 +1,2 @@ +Testing program emulating tool which is configured by file. +Path to a configuration is passed using '-c' parameter. diff --git a/x-pack/agent/pkg/agent/operation/tests/scripts/configurablebyfile-1.0-darwin-x86/main.go b/x-pack/agent/pkg/agent/operation/tests/scripts/configurablebyfile-1.0-darwin-x86/main.go new file mode 100644 index 00000000000..de5eedc00ec --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/tests/scripts/configurablebyfile-1.0-darwin-x86/main.go @@ -0,0 +1,52 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "time" + + "gopkg.in/yaml.v2" +) + +func main() { + f, _ := os.OpenFile(filepath.Join(os.TempDir(), "testing.out"), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) + f.WriteString("starting \n") + if os.Args[1] != "-c" { + panic(fmt.Errorf("configuration not provided %#v", os.Args)) + } + + if len(os.Args) == 2 { + panic(errors.New("configuration path not provided")) + } + + filepath := os.Args[2] + contentBytes, err := ioutil.ReadFile(filepath) + if err != nil { + panic(err) + } + + testCfg := &TestConfig{} + if err := yaml.Unmarshal(contentBytes, &testCfg); err != nil { + panic(err) + } + + if testCfg.TestFile != "" { + panic(errors.New("'TestFile' key not found in config")) + } + + <-time.After(90 * time.Second) + + f.WriteString("finished \n") +} + +// TestConfig is a configuration for testing Config calls +type TestConfig struct { + TestFile string +} diff --git a/x-pack/agent/pkg/agent/operation/tests/scripts/long-1.0-darwin-x86/README.md b/x-pack/agent/pkg/agent/operation/tests/scripts/long-1.0-darwin-x86/README.md new file mode 100644 index 00000000000..8429c59805c --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/tests/scripts/long-1.0-darwin-x86/README.md @@ -0,0 +1 @@ +Testing program emulating tool which executes and completes with a delay or is long running. diff --git a/x-pack/agent/pkg/agent/operation/tests/scripts/short-1.0-darwin-x86/README.md b/x-pack/agent/pkg/agent/operation/tests/scripts/short-1.0-darwin-x86/README.md new file mode 100644 index 00000000000..0576a8b3c4e --- /dev/null +++ b/x-pack/agent/pkg/agent/operation/tests/scripts/short-1.0-darwin-x86/README.md @@ -0,0 +1 @@ +Testing program emulating tool which executes and completes immediately diff --git a/x-pack/agent/pkg/agent/program/methods.go b/x-pack/agent/pkg/agent/program/methods.go new file mode 100644 index 00000000000..ce774144d86 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/methods.go @@ -0,0 +1,142 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package program + +import ( + "fmt" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/transpiler" + "github.com/elastic/beats/v7/x-pack/agent/pkg/boolexp" +) + +type env struct { + ast *transpiler.AST + vars boolexp.VarStore +} + +type envFunc = func(*env, []interface{}) (interface{}, error) + +func methodsEnv(ast *transpiler.AST) *boolexp.MethodsReg { + env := &env{ + ast: ast, + vars: &varStoreAST{ast: ast}, + } + + var methods = boolexp.NewMethodsReg() + methods.MustRegister("HasItems", withEnv(env, hasItems)) + methods.MustRegister("HasNamespace", withEnv(env, hasNamespace)) + return methods +} + +// hasItems the methods take a selector which must be a list, and look for the presence item in the +// list which are "enabled". The logic to determine if an item is enabled is the following: +// - When the "enabled" key is present and set to "true", The item is enabled. +// - When the "enabled" key is missing, the item is enabled. +// - When the "enabled" key is present and set to "false", The item is NOT enabled. +func hasItems(_ *env, args []interface{}) (interface{}, error) { + if len(args) != 1 { + return false, fmt.Errorf("expecting 1 argument received %d", len(args)) + } + + if args[0] == boolexp.Null { + return false, nil + } + + v, ok := args[0].(transpiler.Node).Value().(*transpiler.List) + if !ok { + return false, fmt.Errorf("expecting List and received %T", args[0]) + } + + for _, item := range v.Value().([]transpiler.Node) { + d, ok := item.(*transpiler.Dict) + if !ok { + return false, fmt.Errorf("expecting Dict and received %T", args[0]) + } + + if isEnabled(d) { + return true, nil + } + } + + return false, nil +} + +// hasItems the methods take a selector which must be map and look if the map is enabled. +// The logic to determine if a map is enabled is the following: +// - When the "enabled" key is present and set to "true", The item is enabled. +// - When the "enabled" key is missing, the item is enabled. +// - When the "enabled" key is present and set to "false", The item is NOT enabled. +func hasNamespace(env *env, args []interface{}) (interface{}, error) { + if len(args) < 2 { + return false, fmt.Errorf("expecting at least 2 arguments received %d", len(args)) + } + + namespace, ok := args[0].(string) + if !ok { + return false, fmt.Errorf("invalid namespace %+v", args[0]) + } + + possibleSubKey := make([]string, 0, len(args)) + + for _, v := range args[1:] { + sk, ok := v.(string) + if !ok { + return false, fmt.Errorf("invalid sub key %+v for namespace", v) + } + possibleSubKey = append(possibleSubKey, sk) + } + + var enabledCount int + for _, key := range possibleSubKey { + f := namespace + "." + key + s, ok := transpiler.Lookup(env.ast, transpiler.Selector(f)) + if !ok { + continue + } + + if isEnabled(s) { + enabledCount++ + } + + if enabledCount > 1 { + return false, fmt.Errorf("only one namespace must be enabled in %s", namespace) + } + } + + if enabledCount == 0 { + return false, nil + } + + return true, nil +} + +func withEnv(env *env, method envFunc) boolexp.CallFunc { + return func(args []interface{}) (interface{}, error) { + return method(env, args) + } +} + +func isEnabled(n transpiler.Node) bool { + enabled, ok := n.Find("enabled") + if !ok { + return true + } + + // Get the actual value of the node. + value, ok := enabled.Value().(transpiler.Node).Value().(bool) + if !ok { + return false + } + + return value +} + +type varStoreAST struct { + ast *transpiler.AST +} + +func (v *varStoreAST) Lookup(needle string) (interface{}, bool) { + return transpiler.Lookup(v.ast, transpiler.Selector(needle)) +} diff --git a/x-pack/agent/pkg/agent/program/program.go b/x-pack/agent/pkg/agent/program/program.go new file mode 100644 index 00000000000..0454b554029 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/program.go @@ -0,0 +1,261 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package program + +import ( + "fmt" + "strings" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/transpiler" + "github.com/elastic/beats/v7/x-pack/agent/pkg/boolexp" +) + +// Program represents a program that must be started or must run. +type Program struct { + Spec Spec + Config *transpiler.AST +} + +// Cmd return the execution command to run. +func (p *Program) Cmd() string { + return p.Spec.Cmd +} + +// Checksum return the checksum of the current instance of the program. +func (p *Program) Checksum() string { + return p.Config.HashStr() +} + +// Identifier returns the Program unique identifier. +func (p *Program) Identifier() string { + return strings.ToLower(p.Spec.Name) +} + +// Configuration return the program configuration in a map[string]iface format. +func (p *Program) Configuration() map[string]interface{} { + m, err := p.Config.Map() + if err != nil { + // TODO, that should not panic, refactor to remove any panic. + // Will refactor to never return an error at this stage. + panic(err) + } + return m +} + +// Programs take a Tree representation of the main configuration and apply all the different +// programs rules and generate individual configuration from the rules. +func Programs(singleConfig *transpiler.AST) (map[string][]Program, error) { + grouped, err := groupByOutputs(singleConfig) + if err != nil { + return nil, errors.New(err, errors.TypeConfig, "fail to extract program configuration") + } + + groupedPrograms := make(map[string][]Program) + for k, config := range grouped { + programs, err := detectPrograms(config) + if err != nil { + return nil, errors.New(err, errors.TypeConfig, "fail to generate program configuration") + } + groupedPrograms[k] = programs + } + + return groupedPrograms, nil +} + +func detectPrograms(singleConfig *transpiler.AST) ([]Program, error) { + programs := make([]Program, 0) + for _, spec := range Supported { + specificAST := singleConfig.Clone() + err := spec.Rules.Apply(specificAST) + if err != nil { + return nil, err + } + + if len(spec.When) == 0 { + return nil, ErrMissingWhen + } + + expression, err := boolexp.New(spec.When, methodsEnv(specificAST)) + if err != nil { + return nil, err + } + + ok, err := expression.Eval(&varStoreAST{ast: specificAST}) + if err != nil { + return nil, err + } + + if !ok { + continue + } + + program := Program{ + Spec: spec, + Config: specificAST, + } + programs = append(programs, program) + } + return programs, nil + +} + +// KnownProgramNames returns a list of runnable programs by the agent. +func KnownProgramNames() []string { + names := make([]string, len(Supported)) + for idx, program := range Supported { + names[idx] = program.Name + } + return names +} + +func groupByOutputs(single *transpiler.AST) (map[string]*transpiler.AST, error) { + const ( + outputsKey = "outputs" + outputKey = "output" + streamsKey = "streams" + typeKey = "type" + ) + + if _, found := transpiler.Select(single, outputsKey); !found { + return nil, errors.New("invalid configuration missing outputs configuration") + } + + // Normalize using an intermediate map. + normMap, err := single.Map() + if err != nil { + return nil, errors.New(err, "could not read configuration") + } + + // Recreates multiple configuration grouped by the name of the outputs. + // Each configuration will be started into his own operator with the same name as the output. + grouped := make(map[string]map[string]interface{}) + + m, ok := normMap[outputsKey] + if !ok { + return nil, errors.New("fail to received a list of configured outputs") + } + + out, ok := m.(map[string]interface{}) + if !ok { + return nil, errors.New(fmt.Errorf( + "invalid outputs configuration received, expecting a map not a %T", + m, + )) + } + + for k, v := range out { + outputsOptions, ok := v.(map[string]interface{}) + if !ok { + return nil, errors.New("invalid type for output configuration block") + } + + t, ok := outputsOptions[typeKey] + if !ok { + return nil, fmt.Errorf("missing output type named output %s", k) + } + + n, ok := t.(string) + if !ok { + return nil, fmt.Errorf("invalid type received %T and expecting a string", t) + } + + delete(outputsOptions, typeKey) + + // Propagate global configuration to each individual configuration. + clone := cloneMap(normMap) + delete(clone, outputsKey) + clone[outputKey] = map[string]interface{}{n: v} + clone[streamsKey] = make([]map[string]interface{}, 0) + + grouped[k] = clone + } + + s, ok := normMap[streamsKey] + if !ok { + s = make([]interface{}, 0) + } + + list, ok := s.([]interface{}) + if !ok { + return nil, errors.New("fail to receive a list of configured streams") + } + + for _, item := range list { + stream, ok := item.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf( + "invalid type for stream expecting a map of options and received %T", + item, + ) + } + targetName := findOutputName(stream) + + // Do we have configuration for that specific outputs if not we fail to load the configuration. + config, ok := grouped[targetName] + if !ok { + return nil, fmt.Errorf("unknown configuration output with name %s", targetName) + } + + streams := config[streamsKey].([]map[string]interface{}) + streams = append(streams, stream) + + config[streamsKey] = streams + grouped[targetName] = config + } + + transpiled := make(map[string]*transpiler.AST) + + for name, group := range grouped { + if len(group[streamsKey].([]map[string]interface{})) == 0 { + continue + } + + ast, err := transpiler.NewAST(group) + if err != nil { + return nil, errors.New(err, "fail to generate configuration for output name %s", name) + } + + transpiled[name] = ast + } + + return transpiled, nil +} + +func findOutputName(m map[string]interface{}) string { + const ( + defaultOutputName = "default" + outputKey = "output" + useOutputKey = "use_output" + ) + + output, ok := m[outputKey] + if !ok { + return defaultOutputName + } + + o := output.(map[string]interface{}) + + name, ok := o[useOutputKey] + if !ok { + return defaultOutputName + } + + return name.(string) +} + +func cloneMap(m map[string]interface{}) map[string]interface{} { + newMap := make(map[string]interface{}) + for k, v := range m { + sV, ok := v.(map[string]interface{}) + if ok { + newMap[k] = cloneMap(sV) + continue + } + newMap[k] = v + } + + return newMap +} diff --git a/x-pack/agent/pkg/agent/program/program_test.go b/x-pack/agent/pkg/agent/program/program_test.go new file mode 100644 index 00000000000..293532cac45 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/program_test.go @@ -0,0 +1,608 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package program + +import ( + "io/ioutil" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + yaml "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/internal/yamltest" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/transpiler" +) + +func TestGroupBy(t *testing.T) { + t.Run("only named output", func(t *testing.T) { + sConfig := map[string]interface{}{ + "outputs": map[string]interface{}{ + "special": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "xxx", + "username": "myusername", + "password": "mypassword", + }, + "infosec1": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "yyy", + "username": "anotherusername", + "password": "anotherpassword", + }, + }, + "streams": []map[string]interface{}{ + map[string]interface{}{ + "type": "log", + "path": "/var/log/hello.log", + "output": map[string]interface{}{ + "use_output": "special", + }, + }, + map[string]interface{}{ + "type": "metrics/system", + "output": map[string]interface{}{ + "use_output": "special", + }, + }, + map[string]interface{}{ + "type": "log", + "path": "/var/log/infosec.log", + "output": map[string]interface{}{ + "use_output": "infosec1", + "pipeline": "custompipeline", + "index_name": "myindex", + }, + }, + }, + } + + ast, err := transpiler.NewAST(sConfig) + require.NoError(t, err) + + grouped, err := groupByOutputs(ast) + require.NoError(t, err) + require.Equal(t, 2, len(grouped)) + + c1 := transpiler.MustNewAST(map[string]interface{}{ + "output": map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "hosts": "xxx", + "username": "myusername", + "password": "mypassword", + }, + }, + "streams": []map[string]interface{}{ + map[string]interface{}{ + "type": "log", + "path": "/var/log/hello.log", + "output": map[string]interface{}{ + "use_output": "special", + }, + }, + map[string]interface{}{ + "type": "metrics/system", + "output": map[string]interface{}{ + "use_output": "special", + }, + }, + }, + }) + + c2, _ := transpiler.NewAST(map[string]interface{}{ + "output": map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "hosts": "yyy", + "username": "anotherusername", + "password": "anotherpassword", + }, + }, + "streams": []map[string]interface{}{ + map[string]interface{}{ + "type": "log", + "path": "/var/log/infosec.log", + "output": map[string]interface{}{ + "use_output": "infosec1", + "pipeline": "custompipeline", + "index_name": "myindex", + }, + }, + }, + }) + + defaultConfig, ok := grouped["special"] + require.True(t, ok) + require.Equal(t, c1.Hash(), defaultConfig.Hash()) + + infosec1Config, ok := grouped["infosec1"] + + require.True(t, ok) + require.Equal(t, c2.Hash(), infosec1Config.Hash()) + }) + + t.Run("copy any top level configuration options to each configuration", func(t *testing.T) { + sConfig := map[string]interface{}{ + "monitoring": map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "hosts": "127.0.0.1", + }, + }, + "keystore": map[string]interface{}{ + "path": "${path.data}/keystore", + }, + "outputs": map[string]interface{}{ + "special": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "xxx", + "username": "myusername", + "password": "mypassword", + }, + "infosec1": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "yyy", + "username": "anotherusername", + "password": "anotherpassword", + }, + }, + "streams": []map[string]interface{}{ + map[string]interface{}{ + "type": "log", + "path": "/var/log/hello.log", + "output": map[string]interface{}{ + "use_output": "special", + }, + }, + map[string]interface{}{ + "type": "metrics/system", + "output": map[string]interface{}{ + "use_output": "special", + }, + }, + map[string]interface{}{ + "type": "log", + "path": "/var/log/infosec.log", + "output": map[string]interface{}{ + "use_output": "infosec1", + "pipeline": "custompipeline", + "index_name": "myindex", + }, + }, + }, + } + + ast, err := transpiler.NewAST(sConfig) + require.NoError(t, err) + + grouped, err := groupByOutputs(ast) + require.NoError(t, err) + require.Equal(t, 2, len(grouped)) + + c1 := transpiler.MustNewAST(map[string]interface{}{ + "output": map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "hosts": "xxx", + "username": "myusername", + "password": "mypassword", + }, + }, + "streams": []map[string]interface{}{ + map[string]interface{}{ + "type": "log", + "path": "/var/log/hello.log", + "output": map[string]interface{}{ + "use_output": "special", + }, + }, + map[string]interface{}{ + "type": "metrics/system", + "output": map[string]interface{}{ + "use_output": "special", + }, + }, + }, + "monitoring": map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "hosts": "127.0.0.1", + }, + }, + "keystore": map[string]interface{}{ + "path": "${path.data}/keystore", + }, + }) + + c2, _ := transpiler.NewAST(map[string]interface{}{ + "output": map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "hosts": "yyy", + "username": "anotherusername", + "password": "anotherpassword", + }, + }, + "streams": []map[string]interface{}{ + map[string]interface{}{ + "type": "log", + "path": "/var/log/infosec.log", + "output": map[string]interface{}{ + "use_output": "infosec1", + "pipeline": "custompipeline", + "index_name": "myindex", + }, + }, + }, + "monitoring": map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "hosts": "127.0.0.1", + }, + }, + "keystore": map[string]interface{}{ + "path": "${path.data}/keystore", + }, + }) + + defaultConfig, ok := grouped["special"] + require.True(t, ok) + require.Equal(t, c1.Hash(), defaultConfig.Hash()) + + infosec1Config, ok := grouped["infosec1"] + + require.True(t, ok) + require.Equal(t, c2.Hash(), infosec1Config.Hash()) + }) + + t.Run("fail when the referenced named output doesn't exist", func(t *testing.T) { + sConfig := map[string]interface{}{ + "monitoring": map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "hosts": "localhost", + }, + }, + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "xxx", + "username": "myusername", + "password": "mypassword", + }, + "infosec1": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "yyy", + "username": "anotherusername", + "password": "anotherpassword", + }, + }, + "streams": []map[string]interface{}{ + map[string]interface{}{ + "type": "log", + "path": "/var/log/hello.log", + }, + map[string]interface{}{ + "type": "metrics/system", + }, + map[string]interface{}{ + "type": "log", + "path": "/var/log/infosec.log", + "output": map[string]interface{}{ + "use_output": "donotexist", + "pipeline": "custompipeline", + "index_name": "myindex", + }, + }, + }, + } + + ast, err := transpiler.NewAST(sConfig) + require.NoError(t, err) + + _, err = groupByOutputs(ast) + require.Error(t, err) + }) + + t.Run("only default output", func(t *testing.T) { + sConfig := map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "xxx", + "username": "myusername", + "password": "mypassword", + }, + "infosec1": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "yyy", + "username": "anotherusername", + "password": "anotherpassword", + }, + }, + "streams": []map[string]interface{}{ + map[string]interface{}{ + "type": "log", + "path": "/var/log/hello.log", + }, + map[string]interface{}{ + "type": "metrics/system", + }, + map[string]interface{}{ + "type": "log", + "path": "/var/log/infosec.log", + }, + }, + } + + ast, err := transpiler.NewAST(sConfig) + require.NoError(t, err) + + grouped, err := groupByOutputs(ast) + require.NoError(t, err) + require.Equal(t, 1, len(grouped)) + + c1 := transpiler.MustNewAST(map[string]interface{}{ + "output": map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "hosts": "xxx", + "username": "myusername", + "password": "mypassword", + }, + }, + "streams": []map[string]interface{}{ + map[string]interface{}{ + "type": "log", + "path": "/var/log/hello.log", + }, + map[string]interface{}{ + "type": "metrics/system", + }, + map[string]interface{}{ + "type": "log", + "path": "/var/log/infosec.log", + }, + }, + }) + + defaultConfig, ok := grouped["default"] + require.True(t, ok) + require.Equal(t, c1.Hash(), defaultConfig.Hash()) + + _, ok = grouped["infosec1"] + + require.False(t, ok) + }) + + t.Run("default and named output", func(t *testing.T) { + sConfig := map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "xxx", + "username": "myusername", + "password": "mypassword", + }, + "infosec1": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "yyy", + "username": "anotherusername", + "password": "anotherpassword", + }, + }, + "streams": []map[string]interface{}{ + map[string]interface{}{ + "type": "log", + "path": "/var/log/hello.log", + }, + map[string]interface{}{ + "type": "metrics/system", + }, + map[string]interface{}{ + "type": "log", + "path": "/var/log/infosec.log", + "output": map[string]interface{}{ + "use_output": "infosec1", + "pipeline": "custompipeline", + "index_name": "myindex", + }, + }, + }, + } + + ast, err := transpiler.NewAST(sConfig) + require.NoError(t, err) + + grouped, err := groupByOutputs(ast) + require.NoError(t, err) + require.Equal(t, 2, len(grouped)) + + c1 := transpiler.MustNewAST(map[string]interface{}{ + "output": map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "hosts": "xxx", + "username": "myusername", + "password": "mypassword", + }, + }, + "streams": []map[string]interface{}{ + map[string]interface{}{ + "type": "log", + "path": "/var/log/hello.log", + }, + map[string]interface{}{ + "type": "metrics/system", + }, + }, + }) + + c2, _ := transpiler.NewAST(map[string]interface{}{ + "output": map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "hosts": "yyy", + "username": "anotherusername", + "password": "anotherpassword", + }, + }, + "streams": []map[string]interface{}{ + map[string]interface{}{ + "type": "log", + "path": "/var/log/infosec.log", + "output": map[string]interface{}{ + "use_output": "infosec1", + "pipeline": "custompipeline", + "index_name": "myindex", + }, + }, + }, + }) + + defaultConfig, ok := grouped["default"] + require.True(t, ok) + require.Equal(t, c1.Hash(), defaultConfig.Hash()) + + infosec1Config, ok := grouped["infosec1"] + + require.True(t, ok) + require.Equal(t, c2.Hash(), infosec1Config.Hash()) + }) + + t.Run("streams is an empty list", func(t *testing.T) { + sConfig := map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "xxx", + "username": "myusername", + "password": "mypassword", + }, + "infosec1": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "yyy", + "username": "anotherusername", + "password": "anotherpassword", + }, + }, + "streams": []map[string]interface{}{}, + } + + ast, err := transpiler.NewAST(sConfig) + require.NoError(t, err) + + grouped, err := groupByOutputs(ast) + require.NoError(t, err) + require.Equal(t, 0, len(grouped)) + }) + + t.Run("no streams are defined", func(t *testing.T) { + sConfig := map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "xxx", + "username": "myusername", + "password": "mypassword", + }, + "infosec1": map[string]interface{}{ + "type": "elasticsearch", + "hosts": "yyy", + "username": "anotherusername", + "password": "anotherpassword", + }, + }, + } + + ast, err := transpiler.NewAST(sConfig) + require.NoError(t, err) + + grouped, err := groupByOutputs(ast) + require.NoError(t, err) + require.Equal(t, 0, len(grouped)) + }) +} + +func TestConfiguration(t *testing.T) { + testcases := map[string]struct { + programs []string + expected int + err bool + }{ + "single_config": { + programs: []string{"filebeat", "metricbeat"}, + expected: 2, + }, + // "audit_config": { + // programs: []string{"auditbeat"}, + // expected: 1, + // }, + // "journal_config": { + // programs: []string{"journalbeat"}, + // expected: 1, + // }, + // "monitor_config": { + // programs: []string{"heartbeat"}, + // expected: 1, + // }, + "enabled_true": { + programs: []string{"filebeat"}, + expected: 1, + }, + "enabled_false": { + expected: 0, + }, + "enabled_output_true": { + programs: []string{"filebeat"}, + expected: 1, + }, + "enabled_output_false": { + expected: 0, + }, + } + + for name, test := range testcases { + t.Run(name, func(t *testing.T) { + singleConfig, err := ioutil.ReadFile(filepath.Join("testdata", name+".yml")) + require.NoError(t, err) + + var m map[string]interface{} + err = yaml.Unmarshal(singleConfig, &m) + require.NoError(t, err) + + ast, err := transpiler.NewAST(m) + require.NoError(t, err) + + programs, err := Programs(ast) + if test.err { + require.Error(t, err) + return + } + require.NoError(t, err) + + require.Equal(t, 1, len(programs)) + + defPrograms, ok := programs["default"] + require.True(t, ok) + require.Equal(t, test.expected, len(defPrograms)) + + for _, program := range defPrograms { + programConfig, err := ioutil.ReadFile(filepath.Join( + "testdata", + name+"-"+strings.ToLower(program.Spec.Name)+".yml", + )) + + require.NoError(t, err) + var m map[string]interface{} + err = yamltest.FromYAML(programConfig, &m) + require.NoError(t, err) + + compareMap := &transpiler.MapVisitor{} + program.Config.Accept(compareMap) + + if !assert.True(t, cmp.Equal(m, compareMap.Content)) { + diff := cmp.Diff(m, compareMap.Content) + if diff != "" { + t.Errorf("%s-%s mismatch (-want +got):\n%s", name, program.Spec.Name, diff) + } + } + } + }) + } +} diff --git a/x-pack/agent/pkg/agent/program/spec.go b/x-pack/agent/pkg/agent/program/spec.go new file mode 100644 index 00000000000..831406bf995 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/spec.go @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:generate go run internal/gen.go > supported.go + +package program + +import ( + "fmt" + "io/ioutil" + "path/filepath" + + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/transpiler" +) + +// ErrMissingWhen is returned when no boolean expression is defined for a program. +var ErrMissingWhen = errors.New("program must define a 'When' expression") + +// Spec represents a specific program specification, it contains information about how to run the +// program and also the rules to apply to the single configuration to create a specific program +// configuration. +// +// NOTE: Current spec are build at compile time, we want to revisit that to allow other program +// to register their spec in a secure way. +type Spec struct { + Name string `yaml:"name"` + Cmd string `yaml:"cmd"` + Configurable string `yaml:"configurable"` + Args []string `yaml:"args"` + Rules *transpiler.RuleList `yaml:"rules"` + When string `yaml:"when"` +} + +// ReadSpecs reads all the specs that match the provided globbing path. +func ReadSpecs(path string) ([]Spec, error) { + var specs []Spec + files, err := filepath.Glob(path) + if err != nil { + return []Spec{}, errors.New(err, "could not include spec", errors.TypeConfig) + } + + for _, f := range files { + b, err := ioutil.ReadFile(f) + if err != nil { + return []Spec{}, errors.New(err, fmt.Sprintf("could not read spec %s", f), errors.TypeConfig) + } + + spec := Spec{} + if err := yaml.Unmarshal(b, &spec); err != nil { + return []Spec{}, errors.New(err, fmt.Sprintf("could not unmarshal YAML for file %s", f), errors.TypeConfig) + } + specs = append(specs, spec) + } + + return specs, nil +} + +// NewSpecFromBytes create a Spec from a bytes. +func NewSpecFromBytes(b []byte) (Spec, error) { + spec := Spec{} + if err := yaml.Unmarshal(b, &spec); err != nil { + return Spec{}, errors.New(err, "could not unmarshal YAML", errors.TypeConfig) + } + return spec, nil +} + +// MustReadSpecs read specs and panic on errors. +func MustReadSpecs(path string) []Spec { + s, err := ReadSpecs(path) + if err != nil { + panic(err) + } + return s +} + +// FindSpecByName find a spec by name and return it or false if we cannot find it. +func FindSpecByName(name string) (Spec, bool) { + for _, candidate := range Supported { + if name == candidate.Name { + return candidate, true + } + } + return Spec{}, false +} diff --git a/x-pack/agent/pkg/agent/program/spec_test.go b/x-pack/agent/pkg/agent/program/spec_test.go new file mode 100644 index 00000000000..67fcb6ac759 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/spec_test.go @@ -0,0 +1,115 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package program + +import ( + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/transpiler" +) + +func TestSerialization(t *testing.T) { + spec := Spec{ + Name: "hello", + Cmd: "hellocmd", + Configurable: "file", + Args: []string{"-c", "first"}, + Rules: transpiler.NewRuleList( + transpiler.Copy("inputs", "filebeat"), + transpiler.Filter("filebeat", "output", "keystore"), + transpiler.Rename("filebeat", "notfilebeat"), + transpiler.Translate("type", map[string]interface{}{ + "event/file": "log", + "event/stdin": "stdin", + }), + transpiler.TranslateWithRegexp("type", regexp.MustCompile("^metric/(.+)"), "$1/hello"), + transpiler.Map("inputs", + transpiler.Translate("type", map[string]interface{}{ + "event/file": "log", + })), + transpiler.FilterValues( + "inputs", + "type", + "log", + ), + ), + When: "1 == 1", + } + yml := `name: hello +cmd: hellocmd +configurable: file +args: +- -c +- first +rules: +- copy: + from: inputs + to: filebeat +- filter: + selectors: + - filebeat + - output + - keystore +- rename: + from: filebeat + to: notfilebeat +- translate: + path: type + mapper: + event/file: log + event/stdin: stdin +- translate_with_regexp: + path: type + re: ^metric/(.+) + with: $1/hello +- map: + path: inputs + rules: + - translate: + path: type + mapper: + event/file: log +- filter_values: + selector: inputs + key: type + values: + - log +when: 1 == 1 +` + t.Run("serialization", func(t *testing.T) { + b, err := yaml.Marshal(spec) + require.NoError(t, err) + assert.Equal(t, string(b), yml) + }) + + t.Run("deserialization", func(t *testing.T) { + s := Spec{} + err := yaml.Unmarshal([]byte(yml), &s) + require.NoError(t, err) + assert.Equal(t, spec, s) + }) +} + +func TestExport(t *testing.T) { + dir, err := ioutil.TempDir("", "test_export") + require.NoError(t, err) + defer os.RemoveAll(dir) + + for _, spec := range Supported { + b, err := yaml.Marshal(spec) + require.NoError(t, err) + err = ioutil.WriteFile(filepath.Join(dir, strings.ToLower(spec.Name)+".yml"), b, 0666) + require.NoError(t, err) + } +} diff --git a/x-pack/agent/pkg/agent/program/supported.go b/x-pack/agent/pkg/agent/program/supported.go new file mode 100644 index 00000000000..9c43d45008d --- /dev/null +++ b/x-pack/agent/pkg/agent/program/supported.go @@ -0,0 +1,33 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by x-pack/dev-tools/cmd/buildspec/buildspec.go - DO NOT EDIT. + +package program + +import ( + "strings" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/packer" +) + +var Supported []Spec +var SupportedMap map[string]bool + +func init() { + // Packed Files + // spec/filebeat.yml + // spec/metricbeat.yml + unpacked := packer.MustUnpack("eJyklU2TozYQhu/5GXNNKuFjmSpStQfDBAHD4DW2JaEbkhyBLQE1YDOQyn9PgT/GdmZ2D3uiaKTutxu9j/55aOoN++PvQm7oJmt/75V8+POBKq8lq0okSjYEhTLFi+fU0HfzwlEZepOpgjmf1TlTfJgXDg0K3QuKTgRlLLkPu0jJhi4tSZVXUAB33xDJqR/Lac392jKRFDtNihMZKbhPUdgQtLCJ8hpmrIvInRXR+vikyNuniEuK4J67VkuNRH7DomXA22a9riiAkrtBE7hBmyzHZ9imyMqJAVuCLO06P/dDnSxD7qr4QBWpiQn76duqEsQMu7Sre47eJBuq52jptCmePbrFTASu06U4qeaFM3A/lCnSh1P8sj5wx3dNcD/MaRlPvc1FJY7xmQh8J+dAPAZ+YjGwfr7E3bGO0xEcnGpd4pKrsWd7mv+8cBpqsKt9MxEAaEw9mrFGgNxP+kBS0+u67kwQTCQtFwcO4m5eOFpqzj7JA3fsqRIcJd2dlrH2gQB7mxmwnxfOjhrxK8HBXZ63A+ntnqCkZro9UGCbFNjlvHCOse5W1/gtMmOLGW8HMlSCmXLcU7qFJoiSDQewx+Z4juD7jP1YUgC3HNj9B//jleDd7Yz9u/2uJo41Jg1t4B9n9/4Od2x21Bktr2eliXP/72tDSYAcrnIde+mqNgDxgfm78/lRTNntB3o1OlRTr2fPRctxVlZO0fp+77sPXevDPBmyOo6T4Xp+5zPIDNgQFGvUDIdzLFpee/sSO3CcdBwvLn1nBrSYmRyYWj9zI5d0W4kl8IYlHn38UoW4pVc97M86MNbqwE1V8FeeM23sCQ6jb8lSlNSEGvOhFvadCA3YpDjWMhQPBHl9aogycmclU3CX4ZcymrTx1xSR13TJmsDlDTX4wIE3ZC6rXfH168NvR6ypTftasA/AtkJQY0puTyDbUrR4DIAuuR/WqXECHg5LNlQC9ReYDAQnOnOtmgJtP/6YVL1J8qSrFL0NZHkDnvNajSC9o8DTyI+AqPScKq8kSB8NuqfI3pGV/iXCTp4aTUvGWnjxPSC+58dJz9G6wIt6Sw1LZYjrbPq2fgwA7xl4eWYlbAh+efzQYLptZjipsBlKYsAv7OlkGNDKzaoS3JcdWZwOk1o/Bp7VEpz0GXo5REXzA3NqgiKvuxxG4GnZ02j4pCfIa89geNd3NlTSp8gaplnCT/S5N4DWNtiR1/C61/r8ZC++HQHxa1Q09fVabshJV3gDaU2w6RLRb4EOSE8N7SOgT2Yc/+V4OYy9p4bdbVbneZ73xRpTMKf45TS75DAvnJPOuCDI08a997VJGR7oasx/dZ4/AcJZA5uARGrqJ5Jt/w/SGzje+OTnoHCncby8dQrgz4HhCIPjhdJPMPg+GP795b8AAAD//2tuClk=") + SupportedMap = make(map[string]bool) + + for f, v := range unpacked { + s, err := NewSpecFromBytes(v) + if err != nil { + panic("Cannot read spec from " + f) + } + Supported = append(Supported, s) + SupportedMap[strings.ToLower(s.Name)] = true + } +} diff --git a/x-pack/agent/pkg/agent/program/testdata/audit_config-auditbeat.yml b/x-pack/agent/pkg/agent/program/testdata/audit_config-auditbeat.yml new file mode 100644 index 00000000000..cd4463f674b --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/audit_config-auditbeat.yml @@ -0,0 +1,22 @@ +auditbeat: + modules: + - module: auditd + resolve_ids: true + failure_mode: silent + backlog_limit: 8196 + rate_limit: 0 + include_raw_message: false + include_warnings: false + audit_rule_files: ["${path.config}/audit.rules.d/*.conf"] + - module: file_integrity + paths: + - /bin + - /usr/bin + - /sbin + - /usr/sbin + - /etc +output: + elasticsearch: + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/program/testdata/audit_config.yml b/x-pack/agent/pkg/agent/program/testdata/audit_config.yml new file mode 100644 index 00000000000..fbc963c9735 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/audit_config.yml @@ -0,0 +1,28 @@ +outputs: + default: + type: elasticsearch + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme +streams: + - type: log/file + ignore_older: 123s + - type: audit/auditd + resolve_ids: true + failure_mode: silent + backlog_limit: 8196 + rate_limit: 0 + include_raw_message: false + include_warnings: false + audit_rule_files: ["${path.config}/audit.rules.d/*.conf"] + - type: audit/file_integrity + paths: + - /bin + - /usr/bin + - /sbin + - /usr/sbin + - /etc +management: + host: "localhost" +config: + reload: 123 diff --git a/x-pack/agent/pkg/agent/program/testdata/enabled_false.yml b/x-pack/agent/pkg/agent/program/testdata/enabled_false.yml new file mode 100644 index 00000000000..08ef6b70cd9 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/enabled_false.yml @@ -0,0 +1,16 @@ +streams: + - type: event/file + enabled: false + paths: + - /var/log/hello1.log + - /var/log/hello2.log +management: + host: "localhost" +config: + reload: 123 +outputs: + default: + type: elasticsearch + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/program/testdata/enabled_output_false.yml b/x-pack/agent/pkg/agent/program/testdata/enabled_output_false.yml new file mode 100644 index 00000000000..94dc21d8bc8 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/enabled_output_false.yml @@ -0,0 +1,16 @@ +streams: + - type: event/file + paths: + - /var/log/hello1.log + - /var/log/hello2.log +management: + host: "localhost" +config: + reload: 123 +outputs: + default: + type: elasticsearch + enabled: false + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml b/x-pack/agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml new file mode 100644 index 00000000000..59ea4790801 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml @@ -0,0 +1,14 @@ +filebeat: + inputs: + - type: log + paths: + - /var/log/hello1.log + - /var/log/hello2.log +output: + elasticsearch: + enabled: true + hosts: + - 127.0.0.1:9200 + - 127.0.0.1:9300 + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/program/testdata/enabled_output_true.yml b/x-pack/agent/pkg/agent/program/testdata/enabled_output_true.yml new file mode 100644 index 00000000000..9b3fb3c065a --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/enabled_output_true.yml @@ -0,0 +1,16 @@ +streams: + - type: event/file + paths: + - /var/log/hello1.log + - /var/log/hello2.log +management: + host: "localhost" +config: + reload: 123 +outputs: + default: + type: elasticsearch + enabled: true + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/program/testdata/enabled_true-filebeat.yml b/x-pack/agent/pkg/agent/program/testdata/enabled_true-filebeat.yml new file mode 100644 index 00000000000..d5577f79cff --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/enabled_true-filebeat.yml @@ -0,0 +1,14 @@ +filebeat: + inputs: + - type: log + enabled: true + paths: + - /var/log/hello1.log + - /var/log/hello2.log +output: + elasticsearch: + hosts: + - 127.0.0.1:9200 + - 127.0.0.1:9300 + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/program/testdata/enabled_true.yml b/x-pack/agent/pkg/agent/program/testdata/enabled_true.yml new file mode 100644 index 00000000000..ddbcb2929df --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/enabled_true.yml @@ -0,0 +1,16 @@ +streams: + - type: event/file + enabled: true + paths: + - /var/log/hello1.log + - /var/log/hello2.log +management: + host: "localhost" +config: + reload: 123 +outputs: + default: + type: elasticsearch + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/program/testdata/journal_config-journalbeat.yml b/x-pack/agent/pkg/agent/program/testdata/journal_config-journalbeat.yml new file mode 100644 index 00000000000..60e6261fc0e --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/journal_config-journalbeat.yml @@ -0,0 +1,15 @@ +journalbeat: + inputs: + - type: log/journal + paths: [] + backoff: 1s + max_backoff: 20s + seek: cursor + cursor_seek_fallback: head + include_matches: [] + save_remote_hostname: false +output: + elasticsearch: + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/program/testdata/journal_config.yml b/x-pack/agent/pkg/agent/program/testdata/journal_config.yml new file mode 100644 index 00000000000..732ebab6fb2 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/journal_config.yml @@ -0,0 +1,21 @@ +streams: + - type: log/journal + paths: [] + backoff: 1s + max_backoff: 20s + seek: cursor + cursor_seek_fallback: head + include_matches: [] + save_remote_hostname: false + - type: log/file + ignore_older: 123s +management: + host: "localhost" +config: + reload: 123 +outputs: + default: + type: elasticsearch + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/program/testdata/monitor_config-heartbeat.yml b/x-pack/agent/pkg/agent/program/testdata/monitor_config-heartbeat.yml new file mode 100644 index 00000000000..59feb756885 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/monitor_config-heartbeat.yml @@ -0,0 +1,18 @@ +heartbeat: + monitors: + - type: icmp + name: icmp + schedule: "*/5 * * * * * *" + hosts: ["localhost"] + ipv4: true + ipv6: true + mode: any + timeout: 16s + wait: 1s + - type: tcp + name: tcp +output: + elasticsearch: + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/program/testdata/monitor_config.yml b/x-pack/agent/pkg/agent/program/testdata/monitor_config.yml new file mode 100644 index 00000000000..97e5d9d8980 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/monitor_config.yml @@ -0,0 +1,23 @@ +streams: +- type: monitor/icmp + name: icmp + schedule: '*/5 * * * * * *' + hosts: ["localhost"] + ipv4: true + ipv6: true + mode: any + timeout: 16s + wait: 1s +- type: log/file +- type: monitor/tcp + name: tcp +management: + host: "localhost" +config: + reload: 123 +outputs: + default: + type: elasticsearch + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/program/testdata/single_config-filebeat.yml b/x-pack/agent/pkg/agent/program/testdata/single_config-filebeat.yml new file mode 100644 index 00000000000..257b38017f0 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/single_config-filebeat.yml @@ -0,0 +1,13 @@ +filebeat: + inputs: + - type: log + paths: + - /var/log/hello1.log + - /var/log/hello2.log +output: + elasticsearch: + hosts: + - 127.0.0.1:9200 + - 127.0.0.1:9300 + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/program/testdata/single_config-metricbeat.yml b/x-pack/agent/pkg/agent/program/testdata/single_config-metricbeat.yml new file mode 100644 index 00000000000..8d091fcdd60 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/single_config-metricbeat.yml @@ -0,0 +1,11 @@ +metricbeat: + modules: + - module: docker + setting: one + - module: apache + setting: two +output: + elasticsearch: + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/program/testdata/single_config.yml b/x-pack/agent/pkg/agent/program/testdata/single_config.yml new file mode 100644 index 00000000000..888a769b7f7 --- /dev/null +++ b/x-pack/agent/pkg/agent/program/testdata/single_config.yml @@ -0,0 +1,19 @@ +streams: + - type: metric/docker + setting: one + - type: metric/apache + setting: two + - type: event/file + paths: + - /var/log/hello1.log + - /var/log/hello2.log +management: + host: "localhost" +config: + reload: 123 +outputs: + default: + type: elasticsearch + hosts: [127.0.0.1:9200, 127.0.0.1:9300] + username: elastic + password: changeme diff --git a/x-pack/agent/pkg/agent/stateresolver/resolve.go b/x-pack/agent/pkg/agent/stateresolver/resolve.go new file mode 100644 index 00000000000..d9781bbd6d3 --- /dev/null +++ b/x-pack/agent/pkg/agent/stateresolver/resolve.go @@ -0,0 +1,179 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package stateresolver + +import ( + "sort" + "strings" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/configrequest" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/program" + "github.com/elastic/beats/v7/x-pack/agent/pkg/release" +) + +//go:generate stringer -type=stateChange -linecomment=true + +const shortID = 8 + +// stateChange represent a how a process is modified between configuration change. +type stateChange uint8 + +const ( + startState stateChange = iota + 1 // START + updateState // UPDATE + unchangedState // UNCHANGED +) + +type id string + +// state represent the SHOULD state of the system, contains a reference to the actual bundle of +// configuration received by the upstream call and keep track of the last change executed on a program. +// +// The list of change are the following: +// start: first time to see that configuration and decide to start a new process. +// update: need to update the process switch a new configuration. +// unchanged: keep running the process with the actual configuration. +type state struct { + ID string + LastModified time.Time + Active map[string]active +} + +func (s *state) ShortID() string { + if len(s.ID) <= shortID { + return s.ID + } + return s.ID[0:shortID] +} + +func (s *state) String() string { + var str strings.Builder + str.WriteString("ID:" + s.ID + ", LastModified: " + s.LastModified.String()) + str.WriteString("Active Process [\n") + for _, a := range s.Active { + str.WriteString(a.String()) + } + str.WriteString("]") + + return str.String() +} + +type active struct { + LastChange stateChange + LastModified time.Time + Identifier string + Program program.Program +} + +func (s *active) String() string { + return "Identifier: " + s.Identifier + + ", LastChange: " + s.LastChange.String() + + ", LastModified: " + s.LastModified.String() + + ", Checksum: " + s.Program.Checksum() +} + +type cfgReq interface { + ID() string + CreatedAt() time.Time + Programs() []program.Program +} + +// Converge converges the system, take the current sate and create a new should state and all the steps +// required to go from current state to the new state. +func converge(s state, cfg cfgReq) (state, []configrequest.Step) { + newState := state{ + ID: cfg.ID(), + LastModified: cfg.CreatedAt(), + Active: make(map[string]active, len(cfg.Programs())), + } + + steps := make([]configrequest.Step, 0) + + // Find process that must be stopped. + activeKeys := getActiveKeys(s.Active) + for _, id := range activeKeys { + active := s.Active[id] + + var found bool + for _, p := range cfg.Programs() { + // Still need to run the process. + if id == p.Identifier() { + found = true + break + } + } + + if !found { + steps = append(steps, configrequest.Step{ + ID: configrequest.StepRemove, + Process: active.Program.Cmd(), + Version: release.Version(), + }) + } + } + + // What need to be started or updated. + for _, p := range cfg.Programs() { + a, found := s.Active[p.Identifier()] + if !found { + newState.Active[p.Identifier()] = active{ + LastChange: startState, + LastModified: cfg.CreatedAt(), + Identifier: p.Identifier(), + Program: p, + } + + steps = append(steps, configrequest.Step{ + ID: configrequest.StepRun, + Process: p.Cmd(), + Version: release.Version(), + Meta: map[string]interface{}{ + configrequest.MetaConfigKey: p.Configuration(), + }, + }) + + // Complete new process, skip to the next process. + continue + } + + // Checksum doesn't match and we force an update of the process. + if a.Program.Checksum() != p.Checksum() { + newState.Active[p.Identifier()] = active{ + LastChange: updateState, + LastModified: cfg.CreatedAt(), + Identifier: p.Identifier(), + Program: p, + } + steps = append(steps, configrequest.Step{ + ID: configrequest.StepRun, + Process: p.Cmd(), + Version: release.Version(), + Meta: map[string]interface{}{ + configrequest.MetaConfigKey: p.Configuration(), + }, + }) + } else { + // Configuration did not change in this loop so we keep + // the last configuration as is. + a.LastChange = unchangedState + newState.Active[p.Identifier()] = a + } + } + + // What need to be updated. + return newState, steps +} + +func getActiveKeys(aa map[string]active) []string { + keys := make([]string, 0, len(aa)) + for k := range aa { + keys = append(keys, k) + } + + sort.Strings(keys) + + return keys +} diff --git a/x-pack/agent/pkg/agent/stateresolver/resolve_test.go b/x-pack/agent/pkg/agent/stateresolver/resolve_test.go new file mode 100644 index 00000000000..5ae81e80f1d --- /dev/null +++ b/x-pack/agent/pkg/agent/stateresolver/resolve_test.go @@ -0,0 +1,381 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package stateresolver + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/configrequest" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/program" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/transpiler" + "github.com/elastic/beats/v7/x-pack/agent/pkg/release" +) + +func TestResolver(t *testing.T) { + fb1 := fb("1") + fb2 := fb("2") + mb1 := mb("2") + tn := time.Now() + tn2 := time.Now().Add(time.Minute * 5) + + testcases := map[string]struct { + submit cfgReq + cur state + should state + steps []configrequest.Step + }{ + "from no programs to running program": { + submit: &cfg{ + id: "config-1", + createdAt: tn, + programs: []program.Program{ + fb1, mb1, + }, + }, + cur: state{}, // empty state + should: state{ + ID: "config-1", + LastModified: tn, + Active: map[string]active{ + "filebeat": active{ + LastChange: startState, + LastModified: tn, + Identifier: "filebeat", + Program: fb1, + }, + "metricbeat": active{ + LastChange: startState, + LastModified: tn, + Identifier: "metricbeat", + Program: mb1, + }, + }, + }, + steps: []configrequest.Step{ + configrequest.Step{ + ID: configrequest.StepRun, + Process: fb1.Cmd(), + Version: release.Version(), + Meta: withMeta(fb1), + }, + configrequest.Step{ + ID: configrequest.StepRun, + Process: mb1.Cmd(), + Version: release.Version(), + Meta: withMeta(mb1), + }, + }, + }, + "adding a program to an already running system": { + submit: &cfg{ + id: "config-2", + createdAt: tn2, + programs: []program.Program{ + fb1, mb1, + }, + }, + cur: state{ + ID: "config-1", + LastModified: tn, + Active: map[string]active{ + "filebeat": active{ + LastChange: startState, + LastModified: tn, + Identifier: "filebeat", + Program: fb1, + }, + }, + }, + should: state{ + ID: "config-2", + LastModified: tn2, + Active: map[string]active{ + "filebeat": active{ + LastChange: unchangedState, + LastModified: tn, + Identifier: "filebeat", + Program: fb1, + }, + "metricbeat": active{ + LastChange: startState, + LastModified: tn2, + Identifier: "metricbeat", + Program: mb1, + }, + }, + }, + steps: []configrequest.Step{ + configrequest.Step{ + ID: configrequest.StepRun, + Process: mb1.Cmd(), + Version: release.Version(), + Meta: withMeta(mb1), + }, + }, + }, + "updating an already running program": { + submit: &cfg{ + id: "config-2", + createdAt: tn2, + programs: []program.Program{ + fb2, mb1, + }, + }, + cur: state{ + ID: "config-1", + LastModified: tn, + Active: map[string]active{ + "filebeat": active{ + LastChange: startState, + LastModified: tn, + Identifier: "filebeat", + Program: fb1, + }, + }, + }, + should: state{ + ID: "config-2", + LastModified: tn2, + Active: map[string]active{ + "filebeat": active{ + LastChange: updateState, + LastModified: tn2, + Identifier: "filebeat", + Program: fb2, + }, + "metricbeat": active{ + LastChange: startState, + LastModified: tn2, + Identifier: "metricbeat", + Program: mb1, + }, + }, + }, + steps: []configrequest.Step{ + configrequest.Step{ + ID: configrequest.StepRun, + Process: fb2.Cmd(), + Version: release.Version(), + Meta: withMeta(fb2), + }, + configrequest.Step{ + ID: configrequest.StepRun, + Process: mb1.Cmd(), + Version: release.Version(), + Meta: withMeta(mb1), + }, + }, + }, + "remove a running program and start a new one": { + submit: &cfg{ + id: "config-2", + createdAt: tn2, + programs: []program.Program{ + mb1, + }, + }, + cur: state{ + ID: "config-1", + LastModified: tn, + Active: map[string]active{ + "filebeat": active{ + LastChange: startState, + LastModified: tn, + Identifier: "filebeat", + Program: fb1, + }, + }, + }, + should: state{ + ID: "config-2", + LastModified: tn2, + Active: map[string]active{ + "metricbeat": active{ + LastChange: startState, + LastModified: tn2, + Identifier: "metricbeat", + Program: mb1, + }, + }, + }, + steps: []configrequest.Step{ + configrequest.Step{ + ID: configrequest.StepRemove, + Process: fb1.Cmd(), + Version: release.Version(), + }, + configrequest.Step{ + ID: configrequest.StepRun, + Process: mb1.Cmd(), + Version: release.Version(), + Meta: withMeta(mb1), + }, + }, + }, + "stops all runnings programs": { + submit: &cfg{ + id: "config-2", + createdAt: tn2, + programs: []program.Program{}, + }, + cur: state{ + ID: "config-1", + LastModified: tn, + Active: map[string]active{ + "filebeat": active{ + LastChange: startState, + LastModified: tn, + Identifier: "filebeat", + Program: fb1, + }, + "metricbeat": active{ + LastChange: startState, + LastModified: tn, + Identifier: "metricbeat", + Program: mb1, + }, + }, + }, + should: state{ + ID: "config-2", + LastModified: tn2, + Active: map[string]active{}, + }, + steps: []configrequest.Step{ + configrequest.Step{ + ID: configrequest.StepRemove, + Process: fb1.Cmd(), + Version: release.Version(), + }, + configrequest.Step{ + ID: configrequest.StepRemove, + Process: mb1.Cmd(), + Version: release.Version(), + }, + }, + }, + "no changes detected": { + submit: &cfg{ + id: "config-1", + createdAt: tn, + programs: []program.Program{ + fb1, mb1, + }, + }, + cur: state{ + ID: "config-1", + LastModified: tn, + Active: map[string]active{ + "filebeat": active{ + LastChange: startState, + LastModified: tn, + Identifier: "filebeat", + Program: fb1, + }, + "metricbeat": active{ + LastChange: startState, + LastModified: tn, + Identifier: "metricbeat", + Program: mb1, + }, + }, + }, + should: state{ + ID: "config-1", + LastModified: tn, + Active: map[string]active{ + "filebeat": active{ + LastChange: unchangedState, + LastModified: tn, + Identifier: "filebeat", + Program: fb1, + }, + "metricbeat": active{ + LastChange: unchangedState, + LastModified: tn, + Identifier: "metricbeat", + Program: mb1, + }, + }, + }, + steps: []configrequest.Step{}, + }, + } + + for name, test := range testcases { + t.Run(name, func(t *testing.T) { + should, steps := converge(test.cur, test.submit) + + require.Equal(t, test.should.ID, should.ID) + require.Equal(t, test.should.LastModified, should.LastModified) + + require.Equal(t, len(test.steps), len(steps), "steps count don't match") + require.Equal(t, len(test.should.Active), len(should.Active), "active count don't match") + + for id, a := range test.should.Active { + compare := should.Active[id] + require.Equal(t, a.LastModified, compare.LastModified) + require.Equal(t, a.Identifier, compare.Identifier) + require.Equal(t, a.LastChange, compare.LastChange) + require.Equal(t, a.Program.Checksum(), compare.Program.Checksum()) + } + + if diff := cmp.Diff(test.steps, steps); diff != "" { + t.Errorf("converge() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +type cfg struct { + id string + createdAt time.Time + programs []program.Program +} + +func (c *cfg) ID() string { + return c.id +} + +func (c *cfg) Programs() []program.Program { + return c.programs +} + +func (c *cfg) CreatedAt() time.Time { + return c.createdAt +} + +func p(identifier, checksum string) program.Program { + s, ok := program.FindSpecByName(identifier) + if !ok { + panic("can't find spec with identifier " + identifier) + } + return program.Program{ + Spec: s, + Config: transpiler.MustNewAST(map[string]interface{}{ + s.Name: map[string]interface{}{ + "checksum": checksum, // make sure checksum is different between configuration change. + }, + }), + } +} + +func fb(checksum string) program.Program { + return p("Filebeat", checksum) +} + +func mb(checksum string) program.Program { + return p("Metricbeat", checksum) +} + +func withMeta(prog program.Program) map[string]interface{} { + return map[string]interface{}{ + configrequest.MetaConfigKey: prog.Configuration(), + } +} diff --git a/x-pack/agent/pkg/agent/stateresolver/statechange_string.go b/x-pack/agent/pkg/agent/stateresolver/statechange_string.go new file mode 100644 index 00000000000..53175471789 --- /dev/null +++ b/x-pack/agent/pkg/agent/stateresolver/statechange_string.go @@ -0,0 +1,30 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by "stringer -type=stateChange -linecomment=true"; DO NOT EDIT. + +package stateresolver + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[startState-1] + _ = x[updateState-2] + _ = x[unchangedState-3] +} + +const _stateChange_name = "STARTUPDATEUNCHANGE" + +var _stateChange_index = [...]uint8{0, 5, 11, 19} + +func (i stateChange) String() string { + i -= 1 + if i >= stateChange(len(_stateChange_index)-1) { + return "stateChange(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _stateChange_name[_stateChange_index[i]:_stateChange_index[i+1]] +} diff --git a/x-pack/agent/pkg/agent/stateresolver/stateresolver.go b/x-pack/agent/pkg/agent/stateresolver/stateresolver.go new file mode 100644 index 00000000000..bcc686b8380 --- /dev/null +++ b/x-pack/agent/pkg/agent/stateresolver/stateresolver.go @@ -0,0 +1,63 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package stateresolver + +import ( + "sync" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/configrequest" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + uid "github.com/elastic/beats/v7/x-pack/agent/pkg/id" +) + +// Acker allow to ack the should state from a converge operation. +type Acker func() + +// StateResolver is a resolver of a config state change +// it subscribes to Config event and publishes StateChange events based on that/ +// Based on StateChange event operator know what to do. +type StateResolver struct { + l *logger.Logger + curState state + mu sync.Mutex +} + +// NewStateResolver allow to modify default event names. +func NewStateResolver(log *logger.Logger) (*StateResolver, error) { + return &StateResolver{ + l: log, + }, nil +} + +// Resolve resolves passed config into one or multiple steps +func (s *StateResolver) Resolve( + cfg configrequest.Request, +) (uid.ID, []configrequest.Step, Acker, error) { + s.mu.Lock() + defer s.mu.Unlock() + + newState, steps := converge(s.curState, cfg) + id, err := uid.Generate() + if err != nil { + return id, nil, nil, err + } + + s.l.Infof("New State ID is %s", newState.ShortID()) + s.l.Infof("Converging state requires execution of %d step(s)", len(steps)) + + // Allow the operator to ack the should state when applying the steps is done correctly. + ack := func() { + s.ack(newState) + } + + return id, steps, ack, nil +} + +func (s *StateResolver) ack(newState state) { + s.mu.Lock() + defer s.mu.Unlock() + s.l.Info("Updating internal state") + s.curState = newState +} diff --git a/x-pack/agent/pkg/agent/stateresolver/stateresolver_test.go b/x-pack/agent/pkg/agent/stateresolver/stateresolver_test.go new file mode 100644 index 00000000000..629e477f85e --- /dev/null +++ b/x-pack/agent/pkg/agent/stateresolver/stateresolver_test.go @@ -0,0 +1,63 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package stateresolver + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/program" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +func TestStateResolverAcking(t *testing.T) { + submit := &cfg{ + id: "config-1", + createdAt: time.Now(), + programs: []program.Program{ + fb("1"), mb("1"), + }, + } + + t.Run("when we ACK the should state", func(t *testing.T) { + log, _ := logger.New() + r, err := NewStateResolver(log) + require.NoError(t, err) + + // Current state is empty. + _, steps, ack, err := r.Resolve(submit) + require.NoError(t, err) + require.Equal(t, 2, len(steps)) + + // Ack the should state. + ack() + + // Current sate is not empty lets try to resolve the same configuration. + _, steps, ack, err = r.Resolve(submit) + require.NoError(t, err) + require.Equal(t, 0, len(steps)) + }) + + t.Run("when we don't ACK the should state", func(t *testing.T) { + log, _ := logger.New() + r, err := NewStateResolver(log) + require.NoError(t, err) + + // Current state is empty. + _, steps1, _, err := r.Resolve(submit) + require.NoError(t, err) + require.Equal(t, 2, len(steps1)) + + // We didn't ACK the should state, verify that resolve produce the same output. + _, steps2, _, err := r.Resolve(submit) + require.NoError(t, err) + require.Equal(t, 2, len(steps2)) + + assert.Equal(t, steps1, steps2) + }) +} diff --git a/x-pack/agent/pkg/agent/storage/storage.go b/x-pack/agent/pkg/agent/storage/storage.go new file mode 100644 index 00000000000..1e910b4ddec --- /dev/null +++ b/x-pack/agent/pkg/agent/storage/storage.go @@ -0,0 +1,271 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package storage + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "time" + + "github.com/elastic/beats/v7/libbeat/common/file" + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/crypto" +) + +const perms = 0600 + +type store interface { + Save(io.Reader) error +} + +type load interface { + Load() (io.ReadCloser, error) +} + +// NullStore this is only use to split the work into multiples PRs. +type NullStore struct{} + +// Save takes the fleetConfig and persist it, will return an errors on failure. +func (m *NullStore) Save(_ io.Reader) error { + return nil +} + +type handlerFunc func(io.Reader) error + +// HandlerStore take a function handler and wrap it into the store interface. +type HandlerStore struct { + fn handlerFunc +} + +// NewHandlerStore takes a function and wrap it into an handlerStore. +func NewHandlerStore(fn handlerFunc) *HandlerStore { + return &HandlerStore{fn: fn} +} + +// Save calls the handler. +func (h *HandlerStore) Save(in io.Reader) error { + return h.fn(in) +} + +// ReplaceOnSuccessStore takes a target file, a replacement content and a wrapped store. This +// store is useful if you want to trigger an action to replace another file when the wrapped store save method +// is successful. This store will take care of making a backup copy of the target file and will not +// override the content of the target if the target has already the same content. If an error happen, +// we will not replace the file. +type ReplaceOnSuccessStore struct { + target string + replaceWith []byte + + wrapped store +} + +// NewReplaceOnSuccessStore takes a target file and a replacement content and will replace the target +// file content if the wrapped store execution is done without any error. +func NewReplaceOnSuccessStore(target string, replaceWith []byte, wrapped store) *ReplaceOnSuccessStore { + return &ReplaceOnSuccessStore{ + target: target, + replaceWith: replaceWith, + wrapped: wrapped, + } +} + +// Save will replace a target file with new content if the wrapped store is successful. +func (r *ReplaceOnSuccessStore) Save(in io.Reader) error { + // get original permission + s, err := os.Stat(r.target) + + // Ensure we can read the target files before delegating any call to the wrapped store. + target, err := ioutil.ReadFile(r.target) + if err != nil { + return errors.New(err, + fmt.Sprintf("fail to read content of %s", r.target), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, r.target)) + } + + err = r.wrapped.Save(in) + if err != nil { + return err + } + + if bytes.Equal(target, r.replaceWith) { + return nil + } + + // Windows is tricky with the characters permitted for the path and filename, so we have + // to remove any colon from the string. We are using nanosec precision here because of automated + // tools. + const fsSafeTs = "2006-01-02T15-04-05.9999" + + ts := time.Now() + backFilename := r.target + "." + ts.Format(fsSafeTs) + ".bak" + if err := file.SafeFileRotate(backFilename, r.target); err != nil { + return errors.New(err, + fmt.Sprintf("could not backup %s", r.target), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, r.target)) + } + + fd, err := os.OpenFile(r.target, os.O_CREATE|os.O_WRONLY, s.Mode()) + if err != nil { + // Rollback on any errors to minimize non working state. + if err := file.SafeFileRotate(r.target, backFilename); err != nil { + return errors.New(err, + fmt.Sprintf("could not rollback %s to %s", backFilename, r.target), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, r.target), + errors.M("backup_path", backFilename)) + } + } + + if _, err := fd.Write(r.replaceWith); err != nil { + if err := file.SafeFileRotate(r.target, backFilename); err != nil { + return errors.New(err, + fmt.Sprintf("could not rollback %s to %s", backFilename, r.target), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, r.target), + errors.M("backup_path", backFilename)) + } + } + + return nil +} + +// DiskStore takes a persistedConfig and save it to a temporary files and replace the target file. +type DiskStore struct { + target string +} + +// NewDiskStore creates an unencrypted disk store. +func NewDiskStore(target string) *DiskStore { + return &DiskStore{target: target} +} + +// Save accepts a persistedConfig and saved it to a target file, to do so we will +// make a temporary files if the write is successful we are replacing the target file with the +// original content. +func (d *DiskStore) Save(in io.Reader) error { + tmpFile := d.target + ".tmp" + + fd, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, perms) + if err != nil { + return errors.New(err, + fmt.Sprintf("could not save to %s", tmpFile), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, tmpFile)) + } + + // Always clean up the temporary file and ignore errors. + defer os.Remove(tmpFile) + + if _, err := io.Copy(fd, in); err != nil { + return errors.New(err, "could not save content on disk", + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, tmpFile)) + } + + if err := fd.Close(); err != nil { + return errors.New(err, "could not close temporary file", + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, tmpFile)) + } + + if err := file.SafeFileRotate(d.target, tmpFile); err != nil { + return errors.New(err, + fmt.Sprintf("could not replace target file %s", d.target), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, d.target)) + } + + return nil +} + +// Load return a io.ReadCloser for the target file. +func (d *DiskStore) Load() (io.ReadCloser, error) { + return os.OpenFile(d.target, os.O_RDONLY, perms) +} + +// EncryptedDiskStore save the persisted configuration and encrypt the data on disk. +type EncryptedDiskStore struct { + target string + password []byte +} + +// NewEncryptedDiskStore creates an encrypted disk store. +func NewEncryptedDiskStore(target string, password []byte) *EncryptedDiskStore { + return &EncryptedDiskStore{target: target, password: password} +} + +// Save accepts a persistedConfig, encrypt it and saved it to a target file, to do so we will +// make a temporary files if the write is successful we are replacing the target file with the +// original content. +func (d *EncryptedDiskStore) Save(in io.Reader) error { + const perms = 0600 + + tmpFile := d.target + ".tmp" + + fd, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, perms) + if err != nil { + return errors.New(err, + fmt.Sprintf("could not save to %s", tmpFile), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, tmpFile)) + } + // Always clean up the temporary file and ignore errors. + defer os.Remove(tmpFile) + + w, err := crypto.NewWriterWithDefaults(fd, d.password) + if err != nil { + return errors.New(err, "could not encrypt the data to disk", + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, tmpFile)) + } + + if _, err := io.Copy(w, in); err != nil { + return errors.New(err, "could not save content on disk", + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, tmpFile)) + } + + if err := fd.Close(); err != nil { + return errors.New(err, "could not close temporary file", + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, tmpFile)) + } + + if err := file.SafeFileRotate(d.target, tmpFile); err != nil { + return errors.New(err, + fmt.Sprintf("could not replace target file %s", d.target), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, d.target)) + } + + return nil +} + +// Load return a io.ReadCloser that will take care on unencrypting the data. +func (d *EncryptedDiskStore) Load() (io.ReadCloser, error) { + fd, err := os.OpenFile(d.target, os.O_RDONLY|os.O_CREATE, perms) + if err != nil { + return nil, errors.New(err, + fmt.Sprintf("could not open %s", d.target), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, d.target)) + } + + r, err := crypto.NewReaderWithDefaults(fd, d.password) + if err != nil { + fd.Close() + return nil, errors.New(err, + fmt.Sprintf("could not decode file %s", d.target), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, d.target)) + } + + return r, nil +} diff --git a/x-pack/agent/pkg/agent/storage/storage_test.go b/x-pack/agent/pkg/agent/storage/storage_test.go new file mode 100644 index 00000000000..1151397f6a2 --- /dev/null +++ b/x-pack/agent/pkg/agent/storage/storage_test.go @@ -0,0 +1,228 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package storage + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +func TestReplaceOrRollbackStore(t *testing.T) { + in := bytes.NewReader([]byte{}) + + replaceWith := []byte("new content") + oldContent := []byte("old content") + + success := NewHandlerStore(func(_ io.Reader) error { return nil }) + failure := NewHandlerStore(func(_ io.Reader) error { return errors.New("fail") }) + + t.Run("when the save is successful with target and source don't match", func(t *testing.T) { + target, err := genFile(oldContent) + require.NoError(t, err) + dir := filepath.Dir(target) + defer os.RemoveAll(dir) + + requireFilesCount(t, dir, 1) + + s := NewReplaceOnSuccessStore( + target, + replaceWith, + success, + ) + + err = s.Save(in) + require.NoError(t, err) + + writtenContent, err := ioutil.ReadFile(target) + require.NoError(t, err) + + require.True(t, bytes.Equal(writtenContent, replaceWith)) + requireFilesCount(t, dir, 2) + }) + + t.Run("when save is not successful", func(t *testing.T) { + target, err := genFile(oldContent) + require.NoError(t, err) + dir := filepath.Dir(target) + defer os.RemoveAll(dir) + + requireFilesCount(t, dir, 1) + + s := NewReplaceOnSuccessStore( + target, + replaceWith, + failure, + ) + + err = s.Save(in) + require.Error(t, err) + + writtenContent, err := ioutil.ReadFile(target) + require.NoError(t, err) + + require.True(t, bytes.Equal(writtenContent, oldContent)) + requireFilesCount(t, dir, 1) + }) + + t.Run("when save is successful with target and source content match", func(t *testing.T) { + target, err := genFile(replaceWith) + require.NoError(t, err) + dir := filepath.Dir(target) + defer os.RemoveAll(dir) + + requireFilesCount(t, dir, 1) + + s := NewReplaceOnSuccessStore( + target, + replaceWith, + failure, + ) + + err = s.Save(in) + require.Error(t, err) + + writtenContent, err := ioutil.ReadFile(target) + require.NoError(t, err) + + require.True(t, bytes.Equal(writtenContent, replaceWith)) + requireFilesCount(t, dir, 1) + }) + + t.Run("when target file do not exist", func(t *testing.T) { + s := NewReplaceOnSuccessStore( + fmt.Sprintf("%s/%d", os.TempDir(), time.Now().Unix()), + replaceWith, + success, + ) + err := s.Save(in) + require.Error(t, err) + }) +} + +func TestDiskStore(t *testing.T) { + t.Run("when the target file already exists", func(t *testing.T) { + target, err := genFile([]byte("hello world")) + require.NoError(t, err) + defer os.Remove(target) + d := &DiskStore{target: target} + + msg := []byte("bonjour la famille") + err = d.Save(bytes.NewReader(msg)) + require.NoError(t, err) + + content, err := ioutil.ReadFile(target) + require.NoError(t, err) + + require.Equal(t, msg, content) + }) + + t.Run("when the target do no exist", func(t *testing.T) { + dir, err := ioutil.TempDir("", "configs") + require.NoError(t, err) + defer os.Remove(dir) + + target := filepath.Join(dir, "hello.txt") + d := &DiskStore{target: target} + + msg := []byte("bonjour la famille") + err = d.Save(bytes.NewReader(msg)) + require.NoError(t, err) + + content, err := ioutil.ReadFile(target) + require.NoError(t, err) + + require.Equal(t, msg, content) + }) + + t.Run("return an io.ReadCloser to the target file", func(t *testing.T) { + msg := []byte("bonjour la famille") + target, err := genFile(msg) + require.NoError(t, err) + + d := &DiskStore{target: target} + r, err := d.Load() + require.NoError(t, err) + defer r.Close() + + content, err := ioutil.ReadAll(r) + require.NoError(t, err) + require.Equal(t, msg, content) + }) +} + +func TestEncryptedDiskStore(t *testing.T) { + t.Run("when the target file already exists", func(t *testing.T) { + target, err := genFile([]byte("hello world")) + require.NoError(t, err) + defer os.Remove(target) + d := &EncryptedDiskStore{target: target} + + msg := []byte("bonjour la famille") + err = d.Save(bytes.NewReader(msg)) + require.NoError(t, err) + + // lets read the file + nd := &EncryptedDiskStore{target: target} + r, err := nd.Load() + require.NoError(t, err) + + content, err := ioutil.ReadAll(r) + require.NoError(t, err) + + require.Equal(t, msg, content) + }) + + t.Run("when the target do not exist", func(t *testing.T) { + dir, err := ioutil.TempDir("", "configs") + require.NoError(t, err) + defer os.Remove(dir) + + target := filepath.Join(dir, "hello.txt") + d := &DiskStore{target: target} + + msg := []byte("bonjour la famille") + err = d.Save(bytes.NewReader(msg)) + require.NoError(t, err) + + content, err := ioutil.ReadFile(target) + require.NoError(t, err) + + require.Equal(t, msg, content) + }) +} + +func genFile(b []byte) (string, error) { + dir, err := ioutil.TempDir("", "configs") + if err != nil { + return "", err + } + + f, err := ioutil.TempFile(dir, "config-") + if err != nil { + return "", err + } + f.Write(b) + name := f.Name() + if err := f.Close(); err != nil { + return "", err + } + + return name, nil +} + +func requireFilesCount(t *testing.T, dir string, l int) { + files, err := ioutil.ReadDir(dir) + require.NoError(t, err) + require.Equal(t, l, len(files)) +} diff --git a/x-pack/agent/pkg/agent/transpiler/ast.go b/x-pack/agent/pkg/agent/transpiler/ast.go new file mode 100644 index 00000000000..f7170a91642 --- /dev/null +++ b/x-pack/agent/pkg/agent/transpiler/ast.go @@ -0,0 +1,769 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package transpiler + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +const selectorSep = "." + +// Selector defines a path to access an element in the Tree, currently selectors only works when the +// target is a Dictionary, accessing list values are not currently supported by any methods using +// selectors. +type Selector = string + +var ( + trueVal = []byte{1} + falseVal = []byte{0} +) + +// Node represents a node in the configuration Tree a Node can point to one or multiples children +// nodes. +type Node interface { + fmt.Stringer + + // Find search a string in the current node. + Find(string) (Node, bool) + + // Value returns the value of the node. + Value() interface{} + + //Close clones the current node. + Clone() Node + + // Hash compute a sha256 hash of the current node and recursively call any children. + Hash() []byte +} + +// AST represents a raw configuration which is purely data, only primitives are currently supported, +// Int, float, string and bool. Complex are not taking into consideration. The Tree allow to define +// operation on the retrieves options in a more structured way. We are using this new structure to +// create filtering rules or manipulation rules to convert a configuration to another format. +type AST struct { + root Node +} + +func (a *AST) String() string { + return "{AST:" + a.root.String() + "}" +} + +// Dict represents a dictionary in the Tree, where each key is a entry into an array. The Dict will +// keep the ordering. +type Dict struct { + value []Node +} + +// Find takes a string which is a key and try to find the elements in the associated K/V. +func (d *Dict) Find(key string) (Node, bool) { + for _, i := range d.value { + if i.(*Key).name == key { + return i, true + } + } + return nil, false +} + +func (d *Dict) String() string { + var sb strings.Builder + for i := 0; i < len(d.value); i++ { + sb.WriteString("{") + sb.WriteString(d.value[i].String()) + sb.WriteString("}") + if i < len(d.value)-1 { + sb.WriteString(",") + } + } + return sb.String() +} + +// Value returns the value of dict which is a slice of node. +func (d *Dict) Value() interface{} { + return d.value +} + +// Clone clones the values and return a new dictionary. +func (d *Dict) Clone() Node { + nodes := make([]Node, 0, len(d.value)) + for _, i := range d.value { + nodes = append(nodes, i.Clone()) + } + return &Dict{value: nodes} +} + +// Hash compute a sha256 hash of the current node and recursively call any children. +func (d *Dict) Hash() []byte { + h := sha256.New() + for _, v := range d.value { + h.Write(v.Hash()) + } + return h.Sum(nil) +} + +// Key represents a Key / value pair in the dictionary. +type Key struct { + name string + value Node +} + +func (k *Key) String() string { + var sb strings.Builder + sb.WriteString(k.name) + sb.WriteString(":") + if k.value == nil { + sb.WriteString("nil") + } else { + sb.WriteString(k.value.String()) + } + return sb.String() +} + +// Find finds a key in a Dictionary or a list. +func (k *Key) Find(key string) (Node, bool) { + switch v := k.value.(type) { + case *Dict: + return v.Find(key) + case *List: + return v.Find(key) + default: + return nil, false + } +} + +// Value returns the raw value. +func (k *Key) Value() interface{} { + return k.value +} + +// Clone returns a clone of the current key and his embedded values. +func (k *Key) Clone() Node { + if k.value != nil { + return &Key{name: k.name, value: k.value.Clone()} + } + + return &Key{name: k.name, value: nil} +} + +// Hash compute a sha256 hash of the current node and recursively call any children. +func (k *Key) Hash() []byte { + h := sha256.New() + h.Write([]byte(k.name)) + if k.value != nil { + h.Write(k.value.Hash()) + } + return h.Sum(nil) +} + +// List represents a slice in our Tree. +type List struct { + value []Node +} + +func (l *List) String() string { + var sb strings.Builder + for i := 0; i < len(l.value); i++ { + sb.WriteString("[") + sb.WriteString(l.value[i].String()) + sb.WriteString("]") + if i < len(l.value)-1 { + sb.WriteString(",") + } + } + return sb.String() +} + +// Hash compute a sha256 hash of the current node and recursively call any children. +func (l *List) Hash() []byte { + h := sha256.New() + for _, v := range l.value { + h.Write(v.Hash()) + } + + return h.Sum(nil) +} + +// Find takes an index and return the values at that index. +func (l *List) Find(idx string) (Node, bool) { + i, err := strconv.Atoi(idx) + if err != nil { + return nil, false + } + if i > len(l.value) || i < len(l.value) { + return nil, false + } + + return l.value[i], true +} + +// Value returns the raw value. +func (l *List) Value() interface{} { + return l.value +} + +// Clone clones a new list and the clone items. +func (l *List) Clone() Node { + nodes := make([]Node, 0, len(l.value)) + for _, i := range l.value { + nodes = append(nodes, i.Clone()) + } + return &List{value: nodes} +} + +// StrVal represents a string. +type StrVal struct { + value string +} + +// Find receive a key and return false since the node is not a List or Dict. +func (s *StrVal) Find(key string) (Node, bool) { + return nil, false +} + +func (s *StrVal) String() string { + return s.value +} + +// Value returns the value. +func (s *StrVal) Value() interface{} { + return s.value +} + +// Clone clone the value. +func (s *StrVal) Clone() Node { + k := *s + return &k +} + +// Hash we return the byte slice of the string. +func (s *StrVal) Hash() []byte { + return []byte(s.value) +} + +// IntVal represents an int. +type IntVal struct { + value int +} + +// Find receive a key and return false since the node is not a List or Dict. +func (s *IntVal) Find(key string) (Node, bool) { + return nil, false +} + +func (s *IntVal) String() string { + return strconv.Itoa(s.value) +} + +// Value returns the value. +func (s *IntVal) Value() interface{} { + return s.value +} + +// Clone clone the value. +func (s *IntVal) Clone() Node { + k := *s + return &k +} + +// Hash we convert the value into a string and return the byte slice. +func (s *IntVal) Hash() []byte { + return []byte(s.String()) +} + +// UIntVal represents an int. +type UIntVal struct { + value uint64 +} + +// Find receive a key and return false since the node is not a List or Dict. +func (s *UIntVal) Find(key string) (Node, bool) { + return nil, false +} + +func (s *UIntVal) String() string { + return strconv.FormatUint(s.value, 10) +} + +// Value returns the value. +func (s *UIntVal) Value() interface{} { + return s.value +} + +// Clone clone the value. +func (s *UIntVal) Clone() Node { + k := *s + return &k +} + +// Hash we convert the value into a string and return the byte slice. +func (s *UIntVal) Hash() []byte { + return []byte(s.String()) +} + +// FloatVal represents a float. +// NOTE: We will convert float32 to a float64. +type FloatVal struct { + value float64 +} + +// Find receive a key and return false since the node is not a List or Dict. +func (s *FloatVal) Find(key string) (Node, bool) { + return nil, false +} + +func (s *FloatVal) String() string { + return fmt.Sprintf("%f", s.value) +} + +// Value return the raw value. +func (s *FloatVal) Value() interface{} { + return s.value +} + +// Clone clones the value. +func (s *FloatVal) Clone() Node { + k := *s + return &k +} + +// Hash return a string representation of the value, we try to return the minimal precision we can. +func (s *FloatVal) Hash() []byte { + return []byte(strconv.FormatFloat(s.value, 'f', -1, 64)) +} + +// BoolVal represents a boolean in our Tree. +type BoolVal struct { + value bool +} + +// Find receive a key and return false since the node is not a List or Dict. +func (s *BoolVal) Find(key string) (Node, bool) { + return nil, false +} + +func (s *BoolVal) String() string { + if s.value == true { + return "true" + } + return "false" +} + +// Value returns the value. +func (s *BoolVal) Value() interface{} { + return s.value +} + +// Clone clones the value. +func (s *BoolVal) Clone() Node { + k := *s + return &k +} + +// Hash returns a single byte to represent the boolean value. +func (s *BoolVal) Hash() []byte { + if s.value { + return trueVal + } + return falseVal +} + +// NewAST takes a map and convert it to an internal Tree, allowing us to executes rules on the +// data to shape it in a different way or to filter some of the information. +func NewAST(m map[string]interface{}) (*AST, error) { + val := reflect.ValueOf(m) + root, err := load(val) + if err != nil { + return nil, fmt.Errorf("could not parse configuration into a tree, error: %+v", err) + } + return &AST{root: root}, nil +} + +// MustNewAST create a new AST based on a map[string]iface and panic on any errors. +func MustNewAST(m map[string]interface{}) *AST { + v, err := NewAST(m) + if err != nil { + panic(err) + } + return v +} + +func load(val reflect.Value) (Node, error) { + val = lookupVal(val) + + switch val.Kind() { + case reflect.Map: + return loadMap(val) + case reflect.Slice, reflect.Array: + return loadSliceOrArray(val) + case reflect.String: + return &StrVal{value: val.Interface().(string)}, nil + case reflect.Int, reflect.Int64: + return &IntVal{value: val.Interface().(int)}, nil + case reflect.Uint: + return &UIntVal{value: uint64(val.Interface().(uint))}, nil + case reflect.Uint64: + return &UIntVal{value: val.Interface().(uint64)}, nil + case reflect.Float64: + return &FloatVal{value: val.Interface().(float64)}, nil + case reflect.Float32: + return &FloatVal{value: float64(val.Interface().(float32))}, nil + case reflect.Bool: + return &BoolVal{value: val.Interface().(bool)}, nil + default: + if val.IsNil() { + return nil, nil + } + return nil, fmt.Errorf("unknown type %T for %+v", val.Interface(), val) + } +} + +// Accept takes a visitor and will visit each node of the Tree while calling the right methods on +// the visitor. +// NOTE(ph): Some operation could be refactored to use a visitor, I plan to add a checksum visitor. +func (a *AST) Accept(visitor Visitor) { + a.dispatch(a.root, visitor) +} + +func (a *AST) dispatch(n Node, visitor Visitor) { + switch t := n.(type) { + case *Dict: + visitorDict := visitor.OnDict() + for _, child := range t.value { + key := child.(*Key) + visitorDict.OnKey(key.name) + subvisitor := visitorDict.Visitor() + a.dispatch(key.value, subvisitor) + visitorDict.OnValue(subvisitor) + } + visitorDict.OnComplete() + case *List: + visitorList := visitor.OnList() + for _, child := range t.value { + subvisitor := visitorList.Visitor() + a.dispatch(child, subvisitor) + visitorList.OnValue(subvisitor) + } + visitorList.OnComplete() + case *StrVal: + visitor.OnStr(t.value) + case *IntVal: + visitor.OnInt(t.value) + case *UIntVal: + visitor.OnUInt(t.value) + case *BoolVal: + visitor.OnBool(t.value) + case *FloatVal: + visitor.OnFloat(t.value) + } +} + +// Clone clones the object. +func (a *AST) Clone() *AST { + return &AST{root: a.root.Clone()} +} + +// Hash calculates a hash from all the included nodes in the tree. +func (a *AST) Hash() []byte { + return a.root.Hash() +} + +// HashStr return the calculated hash as a base64 url encoded string. +func (a *AST) HashStr() string { + return base64.URLEncoding.EncodeToString(a.root.Hash()) +} + +// Equal check if two AST are equals by using the computed hash. +func (a *AST) Equal(other *AST) bool { + return bytes.Equal(a.Hash(), other.Hash()) +} + +// MarshalYAML defines how to marshal the Tree, it will convert the tree to a +// map[string]interface{}. +func (a *AST) MarshalYAML() (interface{}, error) { + m := &MapVisitor{} + a.Accept(m) + return m.Content, nil +} + +// MarshalJSON concerts an AST to a valid JSON. +func (a *AST) MarshalJSON() ([]byte, error) { + m := &MapVisitor{} + a.Accept(m) + + b, err := json.Marshal(m.Content) + if err != nil { + return nil, err + } + + return b, nil +} + +func splitPath(s Selector) []string { + return strings.Split(s, selectorSep) +} + +func loadMap(val reflect.Value) (Node, error) { + node := &Dict{} + + mapKeys := val.MapKeys() + names := make([]string, 0, len(mapKeys)) + for _, aKey := range mapKeys { + names = append(names, aKey.Interface().(string)) + } + sort.Strings(names) + + for _, name := range names { + aValue, err := load(val.MapIndex(reflect.ValueOf(name))) + if err != nil { + return nil, err + } + + keys := strings.Split(name, selectorSep) + if !isDictOrKey(aValue) { + node.value = append(node.value, &Key{name: name, value: aValue}) + continue + } + + // get last known existing node + var lastKnownKeyIdx int + var knownNode Node = node + for i, k := range keys { + n, isDict := knownNode.Find(k) + if !isDict { + break + } + + lastKnownKeyIdx = i + knownNode = n + } + + // Produce remainder + restKeys := keys[lastKnownKeyIdx+1:] + restDict := &Dict{} + if len(restKeys) == 0 { + if avd, ok := aValue.(*Dict); ok { + restDict.value = avd.value + } else if avd, ok := aValue.(*Key); ok { + restDict.value = []Node{avd.value} + } else { + restDict.value = append(restDict.value, aValue) + } + } else { + for i := len(restKeys) - 1; i >= 0; i-- { + if len(restDict.value) == 0 { + // this is the first one + restDict.value = []Node{&Key{name: restKeys[i], value: aValue}} + continue + } + + restDict.value = []Node{&Key{name: restKeys[i], value: restDict.Clone()}} + } + } + + // Attach remainder to last known node + restKey := &Key{name: keys[lastKnownKeyIdx], value: restDict} + if knownNodeDict, ok := knownNode.(*Dict); ok { + knownNodeDict.value = append(knownNodeDict.value, restKey) + } else if knownNodeKey, ok := knownNode.(*Key); ok { + dict, ok := knownNodeKey.value.(*Dict) + if ok { + dict.value = append(dict.value, restDict.value...) + } + } + } + + return node, nil +} + +func isDictOrKey(val Node) bool { + if _, ok := val.(*Key); ok { + return true + } + if _, ok := val.(*Dict); ok { + return true + } + return false +} + +func loadSliceOrArray(val reflect.Value) (Node, error) { + node := &List{} + for i := 0; i < val.Len(); i++ { + aValue, err := load(val.Index(i)) + if err != nil { + return nil, err + } + node.value = append(node.value, aValue) + } + return node, nil +} + +func lookupVal(val reflect.Value) reflect.Value { + for (val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface) && !val.IsNil() { + val = val.Elem() + } + return val +} + +// Select takes an AST and a selector and will return a sub AST based on the selector path, will +// return false if the path could not be found. +func Select(a *AST, selector Selector) (*AST, bool) { + var appendTo []Node + + // Run through the graph and find matching nodes. + current := a.root + for _, part := range splitPath(selector) { + n, ok := current.Find(part) + if !ok { + return nil, false + } + + current = n + appendTo = append(appendTo, current) + } + + newAST := &Dict{} + d := newAST + for idx, n := range appendTo { + d.value = append(d.value, n) + // Prepare to add the next level. + if idx < len(appendTo)-1 { + node := n.(*Key) + subdict := &Dict{} + node.value = subdict + d = subdict + } + } + return &AST{root: newAST}, true +} + +// Lookup accept an AST and a selector and return the matching Node at that position. +func Lookup(a *AST, selector Selector) (Node, bool) { + // Run through the graph and find matching nodes. + current := a.root + for _, part := range splitPath(selector) { + n, ok := current.Find(part) + if !ok { + return nil, false + } + + current = n + } + + return current, true +} + +// Insert inserts a node into an existing AST, will return and error if the target position cannot +// accept a new node. +func Insert(a *AST, node Node, to Selector) error { + current := a.root + for _, part := range splitPath(to) { + n, ok := current.Find(part) + if !ok { + switch t := current.(type) { + case *Dict: + newNode := &Key{name: part, value: &Dict{}} + t.value = append(t.value, newNode) + + sort.Slice(t.value, func(i, j int) bool { + return t.value[i].(*Key).name < t.value[j].(*Key).name + }) + + current = newNode + continue + default: + return fmt.Errorf("expecting Dict and received %T", t) + } + } + + current = n + } + + // Apply the current node and replace any existing elements, + // that could exist after the selector. + d, ok := current.(*Key) + if !ok { + return fmt.Errorf("expecting Key and received %T", current) + } + + switch node.(type) { + case *List: + d.value = node + default: + d.value = &Dict{[]Node{node}} + } + return nil +} + +// Combine takes two AST and try to combine both of them into a single AST, notes that this operation +// is not a merges and will return an error if position to merge are not compatible type or +// if the key is already present in the target AST. This method useful if you use the Select methods +// to create 2 different sub AST and want to merge them together again. +func Combine(a, b *AST) (*AST, error) { + newAST := &AST{} + if reflect.TypeOf(b.root) != reflect.TypeOf(b.root) { + return nil, fmt.Errorf("incompatible node type to combine, received %T and %T", a, b) + } + + switch t := a.root.(type) { + case *Dict: + newAST.root = t + for _, element := range b.root.Value().([]Node) { + key := element.(*Key) + _, ok := t.Find(key.name) + if ok { + return nil, fmt.Errorf("could not combine tree, key %s present in both trees", key.name) + } + t.value = append(t.value, key) + } + case *List: + newAST.root = t + t.value = append(t.value, b.root.(*List).value...) + } + + return newAST, nil +} + +// CompOp is operation used for comparing counts in CountComp +type CompOp func(actual int) bool + +// CountComp is a comparison operation which returns true if compareOp evaluates true. +// provided to compareOp is the actual count of elements within a specified paths. +func CountComp(ast *AST, selector Selector, compareOp CompOp) bool { + var actualCount int + node, ok := Lookup(ast, selector) + if ok { + switch t := node.Value().(type) { + case *Key: + actualCount = 1 + case *Dict: + actualCount = len(t.value) + case *List: + actualCount = len(t.value) + default: + actualCount = 1 + } + } + + return compareOp(actualCount) +} + +// Map transforms the AST into a map[string]interface{} and will abort and return any errors related +// to type conversion. +func (a *AST) Map() (map[string]interface{}, error) { + m := &MapVisitor{} + a.Accept(m) + mapped, ok := m.Content.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("could not convert to map[string]iface, type is %T", m.Content) + } + return mapped, nil +} diff --git a/x-pack/agent/pkg/agent/transpiler/ast_test.go b/x-pack/agent/pkg/agent/transpiler/ast_test.go new file mode 100644 index 00000000000..d1ee76ea15f --- /dev/null +++ b/x-pack/agent/pkg/agent/transpiler/ast_test.go @@ -0,0 +1,1105 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package transpiler + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +func TestAST(t *testing.T) { + testcases := map[string]struct { + hashmap map[string]interface{} + expectedMap map[string]interface{} + ast *AST + }{ + "simple slice/string": { + hashmap: map[string]interface{}{ + "inputs": []map[string]interface{}{ + map[string]interface{}{ + "paths": []string{"/var/log/log1", "/var/log/log2"}, + }, + map[string]interface{}{ + "paths": []string{"/var/log/log1", "/var/log/log2"}, + }, + }, + }, + ast: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "inputs", value: &List{ + value: []Node{ + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "integer as key": { + hashmap: map[string]interface{}{ + "1": []string{"/var/log/log1", "/var/log/log2"}, + }, + ast: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "1", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + }, + }, + "support null (nil) values": { + hashmap: map[string]interface{}{ + "nil_v": nil, + }, + ast: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "nil_v"}, + }, + }, + }, + }, + "support bool": { + hashmap: map[string]interface{}{ + "true_v": true, + "false_v": false, + }, + ast: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "false_v", value: &BoolVal{value: false}}, + &Key{name: "true_v", value: &BoolVal{value: true}}, + }, + }, + }, + }, + "support integers": { + hashmap: map[string]interface{}{ + "timeout": 12, + "range": []int{20, 30, 40}, + }, + ast: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "range", + value: &List{ + []Node{ + &IntVal{value: 20}, + &IntVal{value: 30}, + &IntVal{value: 40}, + }, + }, + }, + &Key{name: "timeout", value: &IntVal{value: 12}}, + }, + }, + }, + }, + "support unsigned integers": { + hashmap: map[string]interface{}{ + "timeout": 12, + "range": []uint64{20, 30, 40}, + }, + ast: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "range", + value: &List{ + []Node{ + &UIntVal{value: uint64(20)}, + &UIntVal{value: uint64(30)}, + &UIntVal{value: uint64(40)}, + }, + }, + }, + &Key{name: "timeout", value: &IntVal{value: 12}}, + }, + }, + }, + }, + "support floats": { + hashmap: map[string]interface{}{ + "ratio": 0.5, + "range64": []float64{20.0, 30.0, 40.0}, + "range32": []float32{20.0, 30.0, 40.0}, + }, + ast: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "range32", + value: &List{ + []Node{ + &FloatVal{value: 20.0}, + &FloatVal{value: 30.0}, + &FloatVal{value: 40.0}, + }, + }, + }, + &Key{ + name: "range64", + value: &List{ + []Node{ + &FloatVal{value: 20.0}, + &FloatVal{value: 30.0}, + &FloatVal{value: 40.0}, + }, + }, + }, + &Key{name: "ratio", value: &FloatVal{value: 0.5}}, + }, + }, + }, + }, + "Keys inside Keys with slices": { + hashmap: map[string]interface{}{ + "inputs": map[string]interface{}{ + "type": "log/docker", + "ignore_older": "20s", + "paths": []string{"/var/log/log1", "/var/log/log2"}, + }, + }, + ast: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "inputs", + value: &Dict{ + []Node{ + &Key{name: "ignore_older", value: &StrVal{value: "20s"}}, + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + &Key{name: "type", value: &StrVal{value: "log/docker"}}, + }, + }}, + }, + }, + }, + }, + "Keys with multiple levels of deeps": { + hashmap: map[string]interface{}{ + "inputs": map[string]interface{}{ + "type": "log/docker", + "ignore_older": "20s", + "paths": []string{"/var/log/log1", "/var/log/log2"}, + }, + "outputs": map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "ssl": map[string]interface{}{ + "certificates_authorities": []string{"abc1", "abc2"}, + }, + }, + }, + }, + ast: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "inputs", + value: &Dict{ + []Node{ + &Key{name: "ignore_older", value: &StrVal{value: "20s"}}, + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + &Key{name: "type", value: &StrVal{value: "log/docker"}}, + }, + }}, + &Key{ + name: "outputs", + value: &Dict{ + []Node{ + &Key{ + name: "elasticsearch", + value: &Dict{ + []Node{ + &Key{ + name: "ssl", + value: &Dict{ + []Node{ + &Key{name: "certificates_authorities", + value: &List{ + []Node{ + &StrVal{value: "abc1"}, + &StrVal{value: "abc2"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + + "Keys with multiple levels of deeps with compact keys": { + hashmap: map[string]interface{}{ + "inputs": map[string]interface{}{ + "type": "log/docker", + "ignore_older": "20s", + "paths": []string{"/var/log/log1", "/var/log/log2"}, + }, + "outputs.elasticsearch": map[string]interface{}{ + "ssl": map[string]interface{}{ + "certificates_authorities": []string{"abc1", "abc2"}, + }, + }, + }, + expectedMap: map[string]interface{}{ + "inputs": map[string]interface{}{ + "type": "log/docker", + "ignore_older": "20s", + "paths": []string{"/var/log/log1", "/var/log/log2"}, + }, + "outputs": map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "ssl": map[string]interface{}{ + "certificates_authorities": []string{"abc1", "abc2"}, + }, + }, + }, + }, + ast: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "inputs", + value: &Dict{ + []Node{ + &Key{name: "ignore_older", value: &StrVal{value: "20s"}}, + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + &Key{name: "type", value: &StrVal{value: "log/docker"}}, + }, + }}, + &Key{ + name: "outputs", + value: &Dict{ + []Node{ + &Key{ + name: "elasticsearch", + value: &Dict{ + []Node{ + &Key{ + name: "ssl", + value: &Dict{ + []Node{ + &Key{name: "certificates_authorities", + value: &List{ + []Node{ + &StrVal{value: "abc1"}, + &StrVal{value: "abc2"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + t.Run("MAP to AST", func(t *testing.T) { + for name, test := range testcases { + t.Run(name, func(t *testing.T) { + v, err := NewAST(test.hashmap) + require.NoError(t, err) + if !assert.True(t, yamlComparer(test.ast, v)) { + diff := cmp.Diff(test.ast, v) + t.Logf("Mismatch (-want, +got)\n%s", diff) + } + }) + } + }) + + t.Run("AST to MAP", func(t *testing.T) { + for name, test := range testcases { + t.Run(name, func(t *testing.T) { + visitor := &MapVisitor{} + test.ast.Accept(visitor) + + expectedMap := test.hashmap + if test.expectedMap != nil { + expectedMap = test.expectedMap + } + + if !assert.True(t, yamlComparer(expectedMap, visitor.Content)) { + diff := cmp.Diff(test.hashmap, visitor.Content) + t.Logf("Mismatch (-want, +got)\n%s", diff) + } + }) + } + }) +} + +func TestSelector(t *testing.T) { + testcases := map[string]struct { + hashmap map[string]interface{} + selector Selector + expected *AST + notFound bool + }{ + "two levels of keys": { + selector: "inputs.type", + hashmap: map[string]interface{}{ + "inputs": map[string]interface{}{ + "type": "log/docker", + "ignore_older": "20s", + "paths": []string{"/var/log/log1", "/var/log/log2"}, + }, + }, + expected: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "inputs", + value: &Dict{ + []Node{ + &Key{name: "type", value: &StrVal{value: "log/docker"}}, + }, + }}, + }, + }, + }, + }, + "three level of keys": { + selector: "inputs.ssl", + hashmap: map[string]interface{}{ + "inputs": map[string]interface{}{ + "type": "log/docker", + "ignore_older": "20s", + "paths": []string{"/var/log/log1", "/var/log/log2"}, + "ssl": map[string]interface{}{ + "ca": []string{"ca1", "ca2"}, + "certificate": "/etc/ssl/my.crt", + }, + }, + }, + expected: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "inputs", + value: &Dict{ + []Node{ + &Key{name: "ssl", value: &Dict{ + []Node{ + &Key{name: "ca", value: &List{ + value: []Node{&StrVal{value: "ca1"}, &StrVal{value: "ca2"}}, + }}, + &Key{name: "certificate", value: &StrVal{value: "/etc/ssl/my.crt"}}, + }}}, + }, + }}, + }, + }, + }, + }, + "indexed key access when it doesn't exist": { + selector: "inputs.paths.1", + hashmap: map[string]interface{}{ + "inputs": map[string]interface{}{ + "type": "log/docker", + "ignore_older": "20s", + "paths": []string{"/var/log/log1", "/var/log/log2"}, + }, + }, + notFound: true, + }, + "integer in string for a key": { + selector: "inputs.1", + hashmap: map[string]interface{}{ + "inputs": map[string]interface{}{ + "1": "log/docker", + "ignore_older": "20s", + "paths": []string{"/var/log/log1", "/var/log/log2"}, + }, + }, + expected: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "inputs", + value: &Dict{ + []Node{ + &Key{name: "1", value: &StrVal{value: "log/docker"}}, + }, + }}, + }, + }, + }, + }, + "de-normalized hashmap": { + selector: "inputs.x.ssl", + hashmap: map[string]interface{}{ + "inputs.x": map[string]interface{}{ + "ssl": map[string]interface{}{ + "ca": []string{"ca1", "ca2"}, + "certificate": "/etc/ssl/my.crt", + }, + }, + }, + expected: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "inputs", + value: &Dict{ + []Node{ + &Key{name: "x", value: &Dict{ + []Node{ + &Key{name: "ssl", value: &Dict{ + []Node{ + &Key{name: "ca", value: &List{ + value: []Node{&StrVal{value: "ca1"}, &StrVal{value: "ca2"}}, + }}, + &Key{name: "certificate", value: &StrVal{value: "/etc/ssl/my.crt"}}, + }}}, + }, + }}, + }, + }, + }, + }, + }, + }, + }, + "de-normalized hashmap with duplicate prefix": { + selector: "inputs", + hashmap: map[string]interface{}{ + "inputs.x": map[string]interface{}{ + "ssl": map[string]interface{}{ + "ca": []string{"ca1", "ca2"}, + "certificate": "/etc/ssl/my.crt", + }, + }, + "inputs.y": map[string]interface{}{ + "ssl": map[string]interface{}{ + "ca": []string{"ca1", "ca2"}, + "certificate": "/etc/ssl/my.crt", + }, + }, + }, + expected: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "inputs", + value: &Dict{ + []Node{ + &Key{name: "x", value: &Dict{ + []Node{ + &Key{name: "ssl", value: &Dict{ + []Node{ + &Key{name: "ca", value: &List{ + value: []Node{&StrVal{value: "ca1"}, &StrVal{value: "ca2"}}, + }}, + &Key{name: "certificate", value: &StrVal{value: "/etc/ssl/my.crt"}}, + }}}, + }, + }}, + &Key{name: "y", value: &Dict{ + []Node{ + &Key{name: "ssl", value: &Dict{ + []Node{ + &Key{name: "ca", value: &List{ + value: []Node{&StrVal{value: "ca1"}, &StrVal{value: "ca2"}}, + }}, + &Key{name: "certificate", value: &StrVal{value: "/etc/ssl/my.crt"}}, + }}}, + }, + }}, + }, + }, + }, + }, + }, + }, + }, + "de-normalized hashmap with duplicate prefix, inputs.x.ssl selector": { + selector: "inputs.x.ssl", + hashmap: map[string]interface{}{ + "inputs.x": map[string]interface{}{ + "ssl": map[string]interface{}{ + "ca": []string{"ca1", "ca2"}, + "certificate": "/etc/ssl/my.crt", + }, + }, + "inputs.y": map[string]interface{}{ + "ssl": map[string]interface{}{ + "ca": []string{"ca1", "ca2"}, + "certificate": "/etc/ssl/my.crt", + }, + }, + }, + expected: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "inputs", + value: &Dict{ + []Node{ + &Key{name: "x", value: &Dict{ + []Node{ + &Key{name: "ssl", value: &Dict{ + []Node{ + &Key{name: "ca", value: &List{ + value: []Node{&StrVal{value: "ca1"}, &StrVal{value: "ca2"}}, + }}, + &Key{name: "certificate", value: &StrVal{value: "/etc/ssl/my.crt"}}, + }}}, + }, + }}, + }, + }, + }, + }, + }, + }, + }, + "de-normalized hashmap middle": { + selector: "inputs.x.ssl", + hashmap: map[string]interface{}{ + "inputs": map[string]interface{}{ + "x.ssl": map[string]interface{}{ + "ca": []string{"ca1", "ca2"}, + "certificate": "/etc/ssl/my.crt", + }, + }, + }, + expected: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "inputs", + value: &Dict{ + []Node{ + &Key{name: "x", value: &Dict{ + []Node{ + &Key{name: "ssl", value: &Dict{ + []Node{ + &Key{name: "ca", value: &List{ + value: []Node{&StrVal{value: "ca1"}, &StrVal{value: "ca2"}}, + }}, + &Key{name: "certificate", value: &StrVal{value: "/etc/ssl/my.crt"}}, + }}}, + }, + }}, + }, + }, + }, + }, + }, + }, + }, + "de-normalized hashmap middle with collision": { + selector: "inputs.x.ssl", + hashmap: map[string]interface{}{ + "inputs": map[string]interface{}{ + "x": map[string]interface{}{ + "ssl": map[string]interface{}{ + "ca": []string{"ca1", "ca2"}, + }, + }, + "x.ssl": map[string]interface{}{ + "certificate": "/etc/ssl/my.crt", + }, + }, + }, + expected: &AST{ + root: &Dict{ + value: []Node{ + &Key{ + name: "inputs", + value: &Dict{ + []Node{ + &Key{name: "x", value: &Dict{ + []Node{ + &Key{name: "ssl", value: &Dict{ + []Node{ + &Key{name: "ca", value: &List{ + value: []Node{&StrVal{value: "ca1"}, &StrVal{value: "ca2"}}, + }}, + &Key{name: "certificate", value: &StrVal{value: "/etc/ssl/my.crt"}}, + }}}, + }, + }}, + }, + }, + }, + }, + }, + }, + }, + } + + for name, test := range testcases { + t.Run(name, func(t *testing.T) { + v, err := NewAST(test.hashmap) + require.NoError(t, err) + a, ok := Select(v, test.selector) + if test.notFound { + require.False(t, ok) + return + } + + require.True(t, ok) + if !assert.True(t, reflect.DeepEqual(test.expected, a)) { + t.Logf( + `received: %+v + expected: %+v`, v, test.expected) + } + }) + } +} + +func TestCount(t *testing.T) { + ast := &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "inputs", value: &List{ + value: []Node{ + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + }, + }, + }, + }, + }, + } + + result := CountComp(ast, "inputs", func(a int) bool { return a == 2 }) + assert.True(t, result) + + result = CountComp(ast, "inputs2", func(a int) bool { return a == 0 }) + assert.True(t, result) +} + +func TestMarshalerToYAML(t *testing.T) { + ast := &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "inputs", value: &List{ + value: []Node{ + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + }, + }, + }, + }, + }, + } + + b, err := yaml.Marshal(ast) + require.NoError(t, err) + + expected := []byte(`inputs: +- paths: + - /var/log/log1 + - /var/log/log2 +- paths: + - /var/log/log1 + - /var/log/log2 +`) + + require.True(t, bytes.Equal(expected, b)) +} + +func yamlComparer(expected, candidate interface{}) bool { + expectedYAML, err := yaml.Marshal(&expected) + if err != nil { + return false + } + + candidateYAML, err := yaml.Marshal(&candidate) + if err != nil { + return false + } + + return bytes.Equal(expectedYAML, candidateYAML) +} + +func TestMarshalerToJSON(t *testing.T) { + ast := &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "inputs", value: &List{ + value: []Node{ + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + }, + }, + }, + }, + }, + } + + b, err := json.Marshal(ast) + require.NoError(t, err) + + expected := []byte(`{"inputs":[{"paths":["/var/log/log1","/var/log/log2"]},{"paths":["/var/log/log1","/var/log/log2"]}]}`) + require.True(t, bytes.Equal(expected, b)) +} + +func TestASTToMapStr(t *testing.T) { + ast := &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "inputs", value: &List{ + value: []Node{ + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + }, + }, + }, + }, + }, + } + + m, err := ast.Map() + require.NoError(t, err) + + expected := map[string]interface{}{ + "inputs": []interface{}{ + map[string]interface{}{ + "paths": []interface{}{"/var/log/log1", "/var/log/log2"}, + }, + map[string]interface{}{ + "paths": []interface{}{"/var/log/log1", "/var/log/log2"}, + }, + }, + } + + assert.True(t, reflect.DeepEqual(m, expected)) +} + +func TestHash(t *testing.T) { + tests := map[string]struct { + c1 *AST + c2 *AST + match bool + }{ + "same ast must match": { + c1: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "inputs", value: &List{ + value: []Node{ + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + }, + }, + }, + }, + }, + }, + c2: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "inputs", value: &List{ + value: []Node{ + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + }, + }, + }, + }, + }, + }, + match: true, + }, + "slice reordering doesn't match": { + c1: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "inputs", value: &List{ + value: []Node{ + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log2"}, + &StrVal{value: "/var/log/log1"}, + }}}, + }, + }, + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + }, + }, + }, + }, + }, + }, + c2: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "inputs", value: &List{ + value: []Node{ + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + &Dict{ + value: []Node{ + &Key{name: "paths", value: &List{value: []Node{ + &StrVal{value: "/var/log/log1"}, + &StrVal{value: "/var/log/log2"}, + }}}, + }, + }, + }, + }, + }, + }, + }, + }, + match: false, + }, + "match with int / float / bool": { + c1: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "integer", value: &IntVal{value: 1}}, + &Key{name: "float", value: &FloatVal{value: 1.1234}}, + &Key{name: "bool1", value: &BoolVal{value: true}}, + &Key{name: "bool2", value: &BoolVal{value: false}}, + }, + }, + }, + c2: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "integer", value: &IntVal{value: 1}}, + &Key{name: "float", value: &FloatVal{value: 1.1234}}, + &Key{name: "bool1", value: &BoolVal{value: true}}, + &Key{name: "bool2", value: &BoolVal{value: false}}, + }, + }, + }, + match: true, + }, + "different bool don't match": { + c1: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "v", value: &BoolVal{value: true}}, + }, + }, + }, + c2: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "v", value: &BoolVal{value: false}}, + }, + }, + }, + match: false, + }, + "different integer don't match": { + c1: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "v", value: &IntVal{value: 1}}, + }, + }, + }, + c2: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "v", value: &IntVal{value: 2}}, + }, + }, + }, + match: false, + }, + "different float don't match": { + c1: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "v", value: &FloatVal{value: 1.0}}, + }, + }, + }, + c2: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "v", value: &FloatVal{value: 2.0}}, + }, + }, + }, + match: false, + }, + "different floats representing the same value match": { + c1: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "v", value: &IntVal{value: 1}}, + }, + }, + }, + c2: &AST{ + root: &Dict{ + value: []Node{ + &Key{name: "v", value: &FloatVal{value: 1.0}}, + }, + }, + }, + match: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.match, test.c1.Equal(test.c2)) + }) + + t.Run("test base64 string", func(t *testing.T) { + assert.Equal(t, test.match, test.c1.HashStr() == test.c2.HashStr()) + }) + } +} diff --git a/x-pack/agent/pkg/agent/transpiler/map_visitor.go b/x-pack/agent/pkg/agent/transpiler/map_visitor.go new file mode 100644 index 00000000000..556a775b617 --- /dev/null +++ b/x-pack/agent/pkg/agent/transpiler/map_visitor.go @@ -0,0 +1,93 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package transpiler + +// MapVisitor visit the Tree and return a map[string]interface{}, this map can be serialized +// to a YAML document. +type MapVisitor struct { + Content interface{} +} + +// OnStr is called when we visit a StrVal. +func (m *MapVisitor) OnStr(v string) { + m.Content = v +} + +// OnInt is called when we visit a IntVal. +func (m *MapVisitor) OnInt(v int) { + m.Content = v +} + +// OnUInt is called when we visit a UintVal. +func (m *MapVisitor) OnUInt(v uint64) { + m.Content = v +} + +// OnFloat is called when we visit a FloatVal. +func (m *MapVisitor) OnFloat(v float64) { + m.Content = v +} + +// OnBool is called when we visit a Bool. +func (m *MapVisitor) OnBool(v bool) { + m.Content = v +} + +// OnDict is called when we visit a Dict and return a VisitorDict. +func (m *MapVisitor) OnDict() VisitorDict { + newMap := make(map[string]interface{}) + m.Content = newMap + return &MapVisitorDict{Content: newMap} +} + +// OnList is called when we visit a List and we return a VisitorList. +func (m *MapVisitor) OnList() VisitorList { + m.Content = make([]interface{}, 0) + return &MapVisitorList{MapVisitor: m} +} + +// MapVisitorDict Visitor used for the visiting the Dict. +type MapVisitorDict struct { + Content map[string]interface{} + lastVisitedKey string +} + +// OnKey is called when we visit a key of a Dict. +func (m *MapVisitorDict) OnKey(s string) { + m.lastVisitedKey = s +} + +// OnValue is called when we visit a value of a Dict. +func (m *MapVisitorDict) OnValue(v Visitor) { + visitor := v.(*MapVisitor) + m.Content[m.lastVisitedKey] = visitor.Content +} + +// Visitor returns a MapVisitor. +func (m *MapVisitorDict) Visitor() Visitor { + return &MapVisitor{} +} + +// OnComplete is called when you are done visiting the current Dict. +func (m *MapVisitorDict) OnComplete() {} + +// MapVisitorList is a visitor to visit list. +type MapVisitorList struct { + MapVisitor *MapVisitor +} + +// OnComplete is called when we finish to visit a List. +func (m *MapVisitorList) OnComplete() {} + +// OnValue is called when we visit a value and return a visitor. +func (m *MapVisitorList) OnValue(v Visitor) { + visitor := v.(*MapVisitor) + m.MapVisitor.Content = append(m.MapVisitor.Content.([]interface{}), visitor.Content) +} + +// Visitor return a visitor. +func (m *MapVisitorList) Visitor() Visitor { + return &MapVisitor{} +} diff --git a/x-pack/agent/pkg/agent/transpiler/rules.go b/x-pack/agent/pkg/agent/transpiler/rules.go new file mode 100644 index 00000000000..2f4ceba7de4 --- /dev/null +++ b/x-pack/agent/pkg/agent/transpiler/rules.go @@ -0,0 +1,684 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package transpiler + +import ( + "fmt" + "reflect" + "regexp" + + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" +) + +// RuleList is a container that allow the same tree to be executed on multiple defined Rule. +type RuleList struct { + Rules []Rule +} + +// Rule defines a rule that can be Applied on the Tree. +type Rule interface { + Apply(*AST) error +} + +// Apply applies a list of rules over the same tree and use the result of the previous execution +// as the input of the next rule, will return early if any error is raise during the execution. +func (r *RuleList) Apply(ast *AST) error { + var err error + for _, rule := range r.Rules { + err = rule.Apply(ast) + if err != nil { + return err + } + } + + return nil +} + +// MarshalYAML marsharl a rule list to YAML. +func (r *RuleList) MarshalYAML() (interface{}, error) { + doc := make([]map[string]Rule, 0, len(r.Rules)) + + for _, rule := range r.Rules { + var name string + switch rule.(type) { + case *CopyRule: + name = "copy" + case *RenameRule: + name = "rename" + case *TranslateRule: + name = "translate" + case *TranslateWithRegexpRule: + name = "translate_with_regexp" + case *MapRule: + name = "map" + case *FilterRule: + name = "filter" + case *FilterValuesRule: + name = "filter_values" + case *FilterValuesWithRegexpRule: + name = "filter_values_with_regexp" + case *ExtractListItemRule: + name = "extract_list_items" + + default: + return nil, fmt.Errorf("unknown rule of type %T", rule) + } + + subdoc := map[string]Rule{ + name: rule, + } + + doc = append(doc, subdoc) + } + return doc, nil +} + +// UnmarshalYAML unmashal a YAML document into a RuleList. +func (r *RuleList) UnmarshalYAML(unmarshal func(interface{}) error) error { + var unpackTo []map[string]interface{} + + err := unmarshal(&unpackTo) + if err != nil { + return err + } + + // NOTE(ph): this is a bit of a hack because I want to make sure + // the unpack strategy stay in the struct implementation and yaml + // doesn't have a RawMessage similar to the JSON package, so partial unpack + // is not possible. + unpack := func(in interface{}, out interface{}) error { + b, err := yaml.Marshal(in) + if err != nil { + return err + } + return yaml.Unmarshal(b, out) + } + + var rules []Rule + + for _, m := range unpackTo { + ks := keys(m) + if len(ks) > 1 { + return fmt.Errorf("unknown rule identifier, expecting one identifier and received %d", len(ks)) + } + + name := ks[0] + fields := m[name] + + var r Rule + switch name { + case "copy": + r = &CopyRule{} + case "rename": + r = &RenameRule{} + case "translate": + r = &TranslateRule{} + case "translate_with_regexp": + r = &TranslateWithRegexpRule{} + case "map": + r = &MapRule{} + case "filter": + r = &FilterRule{} + case "filter_values": + r = &FilterValuesRule{} + case "filter_values_with_regexp": + r = &FilterValuesWithRegexpRule{} + case "extract_list_items": + r = &ExtractListItemRule{} + default: + return fmt.Errorf("unknown rule of type %s", name) + } + + if err := unpack(fields, r); err != nil { + return err + } + + rules = append(rules, r) + } + r.Rules = rules + return nil +} + +// ExtractListItemRule extract items with specified name from a list of maps. +// The result is store in a new array. +// Example: +// Source: {items: []List{ map{"key": "val1"}, map{"key", "val2"} } } +// extract-list-item -path:items -item:key -to:keys +// result: +// {items: []List{ map{"key": "val1"}, map{"key", "val2"} }, keys: []List {"val1", "val2"} } +type ExtractListItemRule struct { + Path Selector + Item string + To string +} + +// Apply extracts items from array. +func (r *ExtractListItemRule) Apply(ast *AST) error { + node, found := Lookup(ast, r.Path) + if !found { + return nil + } + + nodeVal := node.Value() + if nodeVal == nil { + return nil + } + + l, isList := nodeVal.(*List) + if !isList { + return nil + } + + newList := &List{ + value: make([]Node, 0, len(l.value)), + } + + for _, n := range l.value { + in, found := n.Find(r.Item) + if !found { + continue + } + + vn, ok := in.Value().(Node) + if !ok { + continue + } + + newList.value = append(newList.value, vn.Clone()) + } + + return Insert(ast, newList, r.To) +} + +// ExtractListItem creates a ExtractListItemRule +func ExtractListItem(path Selector, item, target string) *ExtractListItemRule { + return &ExtractListItemRule{ + Path: path, + Item: item, + To: target, + } +} + +// RenameRule takes a selectors and will rename the last path of a Selector to a new name. +type RenameRule struct { + From Selector + To string +} + +// Apply renames the last items of a Selector to a new name and keep all the other values and will +// return an error on failure. +func (r *RenameRule) Apply(ast *AST) error { + // Skip rename when node is not found. + node, ok := Lookup(ast, r.From) + if !ok { + return nil + } + + n, ok := node.(*Key) + if !ok { + return fmt.Errorf("cannot rename, invalid type expected 'Key' received '%T'", node) + } + n.name = r.To + return nil +} + +// Rename creates a rename rule. +func Rename(from Selector, to string) *RenameRule { + return &RenameRule{From: from, To: to} +} + +// CopyRule take a from Selector and a destination selector and will insert an existing node into +// the destination, will return an errors if the types are incompatible. +type CopyRule struct { + From Selector + To Selector +} + +// Copy creates a copy rule. +func Copy(from, to Selector) *CopyRule { + return &CopyRule{From: from, To: to} +} + +// Apply copy a part of a tree into a new destination. +func (r CopyRule) Apply(ast *AST) error { + node, ok := Lookup(ast, r.From) + // skip when the `from` node is not found. + if !ok { + return nil + } + + if err := Insert(ast, node, r.To); err != nil { + return err + } + + return nil +} + +// TranslateRule take a selector and will try to replace any values that match the translation +// table. +type TranslateRule struct { + Path Selector + Mapper map[string]interface{} +} + +// Translate create a translation rule. +func Translate(path Selector, mapper map[string]interface{}) *TranslateRule { + return &TranslateRule{Path: path, Mapper: mapper} +} + +// Apply translates matching elements of a translation table for a specific selector. +func (r *TranslateRule) Apply(ast *AST) error { + // Skip translate when node is not found. + node, ok := Lookup(ast, r.Path) + if !ok { + return nil + } + + n, ok := node.(*Key) + if !ok { + return fmt.Errorf("cannot rename, invalid type expected 'Key' received '%T'", node) + } + + for k, v := range r.Mapper { + if k == n.Value().(Node).Value() { + val := reflect.ValueOf(v) + nodeVal, err := load(val) + if err != nil { + return err + } + n.value = nodeVal + } + } + + return nil +} + +// TranslateWithRegexpRule take a selector and will try to replace using the regular expression. +type TranslateWithRegexpRule struct { + Path Selector + Re *regexp.Regexp + With string +} + +// MarshalYAML marshal a TranslateWithRegexpRule into a YAML document. +func (r *TranslateWithRegexpRule) MarshalYAML() (interface{}, error) { + return map[string]interface{}{ + "path": r.Path, + "re": r.Re.String(), + "with": r.With, + }, nil +} + +// UnmarshalYAML unmarshal a YAML document into a TranslateWithRegexpRule. +func (r *TranslateWithRegexpRule) UnmarshalYAML(unmarshal func(interface{}) error) error { + tmp := struct { + Path string + Re string + With string + }{} + + if err := unmarshal(&tmp); err != nil { + return errors.New(err, "cannot unmarshal into a TranslateWithRegexpRule") + } + + re, err := regexp.Compile(tmp.Re) + if err != nil { + errors.New(err, "invalid regular expression for TranslateWithRegexpRule") + } + + *r = TranslateWithRegexpRule{ + Path: tmp.Path, + Re: re, + With: tmp.With, + } + return nil +} + +// TranslateWithRegexp create a translation rule. +func TranslateWithRegexp(path Selector, re *regexp.Regexp, with string) *TranslateWithRegexpRule { + return &TranslateWithRegexpRule{Path: path, Re: re, With: with} +} + +// Apply translates matching elements of a translation table for a specific selector. +func (r *TranslateWithRegexpRule) Apply(ast *AST) error { + // Skip translate when node is not found. + node, ok := Lookup(ast, r.Path) + if !ok { + return nil + } + + n, ok := node.(*Key) + if !ok { + return fmt.Errorf("cannot rename, invalid type expected 'Key' received '%T'", node) + } + + candidate, ok := n.value.(Node).Value().(string) + if !ok { + return fmt.Errorf("cannot filter on value expected 'string' and received %T", candidate) + } + + s := r.Re.ReplaceAllString(candidate, r.With) + val := reflect.ValueOf(s) + nodeVal, err := load(val) + if err != nil { + return err + } + + n.value = nodeVal + + return nil +} + +// MapRule allow to apply mutliples rules on a subset of a Tree based on a provided selector. +type MapRule struct { + Path Selector + Rules []Rule +} + +// Map creates a new map rule. +func Map(path Selector, rules ...Rule) *MapRule { + return &MapRule{Path: path, Rules: rules} +} + +// Apply maps multiples rules over a subset of the tree. +func (r *MapRule) Apply(ast *AST) error { + node, ok := Lookup(ast, r.Path) + // Skip map when node is not found. + if !ok { + return nil + } + + n, ok := node.(*Key) + if !ok { + return fmt.Errorf( + "cannot iterate over node, invalid type expected 'Key' received '%T'", + node, + ) + } + + l, ok := n.Value().(*List) + if !ok { + return fmt.Errorf( + "cannot iterate over node, invalid type expected 'List' received '%T'", + node, + ) + } + + values := l.Value().([]Node) + + for idx, item := range values { + newAST := &AST{root: item} + for _, rule := range r.Rules { + err := rule.Apply(newAST) + if err != nil { + return err + } + values[idx] = newAST.root + } + } + return nil +} + +// MarshalYAML marshal a MapRule into a YAML document. +func (r *MapRule) MarshalYAML() (interface{}, error) { + rules, err := NewRuleList(r.Rules...).MarshalYAML() + if err != nil { + return nil, err + } + + return map[string]interface{}{ + "path": r.Path, + "rules": rules, + }, nil +} + +// UnmarshalYAML unmarshal a YAML document into a MapRule. +func (r *MapRule) UnmarshalYAML(unmarshal func(interface{}) error) error { + tmp := struct { + Path string + Rules RuleList + }{} + + if err := unmarshal(&tmp); err != nil { + return errors.New(err, "cannot unmarshal into a MapRule") + } + + *r = MapRule{ + Path: tmp.Path, + Rules: tmp.Rules.Rules, + } + return nil +} + +// FilterRule allows to filter the tree and return only a subset of selectors. +type FilterRule struct { + Selectors []Selector +} + +// Filter returns a new Filter Rule. +func Filter(selectors ...Selector) *FilterRule { + return &FilterRule{Selectors: selectors} +} + +// Apply filters a Tree based on list of selectors. +func (r *FilterRule) Apply(ast *AST) error { + mergedAST := &AST{root: &Dict{}} + var err error + for _, selector := range r.Selectors { + newAST, ok := Select(ast.Clone(), selector) + if !ok { + continue + } + mergedAST, err = Combine(mergedAST, newAST) + if err != nil { + return err + } + } + ast.root = mergedAST.root + return nil +} + +// FilterValuesRule allows to filter the tree and return only a subset of selectors with a predefined set of values. +type FilterValuesRule struct { + Selector Selector + Key Selector + Values []interface{} +} + +// FilterValues returns a new FilterValues Rule. +func FilterValues(selector Selector, key Selector, values ...interface{}) *FilterValuesRule { + return &FilterValuesRule{Selector: selector, Key: key, Values: values} +} + +// Apply filters a Tree based on list of selectors. +func (r *FilterValuesRule) Apply(ast *AST) error { + node, ok := Lookup(ast, r.Selector) + // Skip map when node is not found. + if !ok { + return nil + } + + n, ok := node.(*Key) + if !ok { + return fmt.Errorf( + "cannot iterate over node, invalid type expected 'Key' received '%T'", + node, + ) + } + + l, ok := n.Value().(*List) + if !ok { + return fmt.Errorf( + "cannot iterate over node, invalid type expected 'List' received '%T'", + node, + ) + } + + values := l.Value().([]Node) + var newNodes []Node + + for idx := 0; idx < len(values); idx++ { + item := values[idx] + newRoot := &AST{root: item} + + newAST, ok := Lookup(newRoot, r.Key) + if !ok { + newNodes = append(newNodes, item) + continue + } + + // filter values + n, ok := newAST.(*Key) + if !ok { + return fmt.Errorf("cannot filter on value, invalid type expected 'Key' received '%T'", newAST) + } + + if n.name != r.Key { + newNodes = append(newNodes, item) + continue + } + + for _, v := range r.Values { + if v == n.value.(Node).Value() { + newNodes = append(newNodes, item) + break + } + } + + } + + l.value = newNodes + n.value = l + return nil +} + +// FilterValuesWithRegexpRule allows to filter the tree and return only a subset of selectors with +// a regular expression. +type FilterValuesWithRegexpRule struct { + Selector Selector + Key Selector + Re *regexp.Regexp +} + +// FilterValuesWithRegexp returns a new FilterValuesWithRegexp Rule. +func FilterValuesWithRegexp( + selector Selector, + key Selector, + re *regexp.Regexp, +) *FilterValuesWithRegexpRule { + return &FilterValuesWithRegexpRule{Selector: selector, Key: key, Re: re} +} + +// MarshalYAML marshal a FilterValuesWithRegexpRule into a YAML document. +func (r *FilterValuesWithRegexpRule) MarshalYAML() (interface{}, error) { + return map[string]interface{}{ + "selector": r.Selector, + "key": r.Key, + "re": r.Re.String(), + }, nil +} + +// UnmarshalYAML unmarshal a YAML document into a FilterValuesWithRegexpRule. +func (r *FilterValuesWithRegexpRule) UnmarshalYAML(unmarshal func(interface{}) error) error { + tmp := struct { + Selector string + Key string + Re string + }{} + + if err := unmarshal(&tmp); err != nil { + return errors.New(err, "cannot unmarshal into a FilterValuesWithRegexpRule") + } + + re, err := regexp.Compile(tmp.Re) + if err != nil { + errors.New(err, "invalid regular expression for FilterValuesWithRegexpRule") + } + *r = FilterValuesWithRegexpRule{ + Selector: tmp.Selector, + Key: tmp.Key, + Re: re, + } + + return nil +} + +// Apply filters a Tree based on list of selectors. +func (r *FilterValuesWithRegexpRule) Apply(ast *AST) error { + node, ok := Lookup(ast, r.Selector) + // Skip map when node is not found. + if !ok { + return nil + } + + n, ok := node.(*Key) + if !ok { + return fmt.Errorf( + "cannot iterate over node, invalid type expected 'Key' received '%T'", + node, + ) + } + + l, ok := n.Value().(*List) + if !ok { + return fmt.Errorf( + "cannot iterate over node, invalid type expected 'List' received '%T'", + node, + ) + } + + values := l.Value().([]Node) + var newNodes []Node + + for idx := 0; idx < len(values); idx++ { + item := values[idx] + newRoot := &AST{root: item} + + newAST, ok := Lookup(newRoot, r.Key) + if !ok { + newNodes = append(newNodes, item) + continue + } + + // filter values + n, ok := newAST.(*Key) + if !ok { + return fmt.Errorf("cannot filter on value, invalid type expected 'Key' received '%T'", newAST) + } + + if n.name != r.Key { + newNodes = append(newNodes, item) + continue + } + + candidate, ok := n.value.(Node).Value().(string) + if !ok { + return fmt.Errorf("cannot filter on value expected 'string' and received %T", candidate) + } + + if r.Re.MatchString(candidate) { + newNodes = append(newNodes, item) + } + } + + l.value = newNodes + n.value = l + return nil +} + +// NewRuleList returns a new list of rules to be executed. +func NewRuleList(rules ...Rule) *RuleList { + return &RuleList{Rules: rules} +} + +func keys(m map[string]interface{}) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + return keys +} diff --git a/x-pack/agent/pkg/agent/transpiler/rules_test.go b/x-pack/agent/pkg/agent/transpiler/rules_test.go new file mode 100644 index 00000000000..5cfc3048ac4 --- /dev/null +++ b/x-pack/agent/pkg/agent/transpiler/rules_test.go @@ -0,0 +1,501 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package transpiler + +import ( + "regexp" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/internal/yamltest" +) + +func TestRules(t *testing.T) { + testcases := map[string]struct { + givenYAML string + expectedYAML string + rule Rule + }{ + + "extract items from array": { + givenYAML: ` +streams: + - name: MySQL error log + input: + type: file + path: /var/log/mysql/error.log + - name: MySQL access log + input: + type: file + path: /var/log/mysql/access.log + - name: MySQL metrics + input: + type: mysql + host: localhost + port: 3306 +`, + expectedYAML: ` +streams: + - name: MySQL error log + input: + type: file + path: /var/log/mysql/error.log + - name: MySQL access log + input: + type: file + path: /var/log/mysql/access.log + - name: MySQL metrics + input: + type: mysql + host: localhost + port: 3306 +inputs: + - type: file + path: /var/log/mysql/error.log + - type: file + path: /var/log/mysql/access.log + - type: mysql + host: localhost + port: 3306 +`, + rule: &RuleList{ + Rules: []Rule{ + ExtractListItem("streams", "input", "inputs"), + }, + }, + }, + "two level rename": { + givenYAML: ` +output: + elasticsearch: + hosts: + - "127.0.0.1:9201" + - "127.0.0.1:9202" + logstash: + port: 5 +`, + expectedYAML: ` +output: + what: + hosts: + - "127.0.0.1:9201" + - "127.0.0.1:9202" + logstash: + port: 5 +`, + rule: &RuleList{ + Rules: []Rule{ + Rename("output.elasticsearch", "what"), + }, + }, + }, + "rename non existing key": { + givenYAML: ` +output: + elasticsearch: + hosts: + - "127.0.0.1:9201" + - "127.0.0.1:9202" + logstash: + port: 5 +`, + expectedYAML: ` +output: + elasticsearch: + hosts: + - "127.0.0.1:9201" + - "127.0.0.1:9202" + logstash: + port: 5 +`, + rule: &RuleList{ + Rules: []Rule{ + Rename("donoexist", "what"), + }, + }, + }, + "copy top level slice": { + givenYAML: ` +inputs: + - type: event/file + - type: metric/docker +`, + expectedYAML: ` +inputs: + - type: event/file + - type: metric/docker +filebeat: + inputs: + - type: event/file + - type: metric/docker +`, + rule: &RuleList{ + Rules: []Rule{ + Copy("inputs", "filebeat"), + }, + }, + }, + "copy keep ordering for filtering": { + givenYAML: ` +inputs: + - type: event/file + - type: metric/docker +`, + expectedYAML: ` +filebeat: + inputs: + - type: event/file + - type: metric/docker +`, + rule: &RuleList{ + Rules: []Rule{ + Copy("inputs", "filebeat"), + Filter("filebeat"), + }, + }, + }, + "copy non existing key": { + givenYAML: ` +inputs: + - type: event/file + - type: metric/docker +`, + expectedYAML: ` +inputs: + - type: event/file + - type: metric/docker +`, + rule: &RuleList{ + Rules: []Rule{ + Copy("what-inputs", "filebeat"), + }, + }, + }, + "translate key values to another value": { + givenYAML: ` +name: "hello" +`, + expectedYAML: ` +name: "bonjour" +`, + rule: &RuleList{ + Rules: []Rule{ + Translate("name", map[string]interface{}{ + "aurevoir": "a bientot", + "hello": "bonjour", + }), + }, + }, + }, + "translate on non existing key": { + givenYAML: ` +name: "hello" +`, + expectedYAML: ` +name: "hello" +`, + rule: &RuleList{ + Rules: []Rule{ + Translate("donotexist", map[string]interface{}{ + "aurevoir": "a bientot", + "hello": "bonjour", + }), + }, + }, + }, + "translate 1 level deep key values to another value": { + givenYAML: ` +input: + type: "aurevoir" +`, + expectedYAML: ` +input: + type: "a bientot" +`, + rule: &RuleList{ + Rules: []Rule{ + Translate("input.type", map[string]interface{}{ + "aurevoir": "a bientot", + "hello": "bonjour", + }), + }, + }, + }, + "map operation on array": { + givenYAML: ` +inputs: + - type: event/file + - type: log/docker +`, + expectedYAML: ` +inputs: + - type: log + - type: docker +`, + rule: &RuleList{ + Rules: []Rule{ + Map("inputs", + Translate("type", map[string]interface{}{ + "event/file": "log", + "log/docker": "docker", + })), + }, + }, + }, + "map operation on non existing": { + givenYAML: ` +inputs: + - type: event/file + - type: log/docker +`, + expectedYAML: ` +inputs: + - type: event/file + - type: log/docker +`, + rule: &RuleList{ + Rules: []Rule{ + Map("no-inputs", + Translate("type", map[string]interface{}{ + "event/file": "log", + "log/docker": "docker", + })), + }, + }, + }, + "single selector on top level keys": { + givenYAML: ` +inputs: + - type: event/file +output: + logstash: + port: 5 +`, + expectedYAML: ` +output: + logstash: + port: 5 +`, + rule: &RuleList{ + Rules: []Rule{ + Filter("output"), + }, + }, + }, + "multiple selectors on top level keys": { + givenYAML: ` +inputs: + - type: event/file +filebeat: + - type: docker +output: + logstash: + port: 5 +`, + expectedYAML: ` +inputs: + - type: event/file +output: + logstash: + port: 5 +`, + rule: &RuleList{ + Rules: []Rule{ + Filter("output", "inputs"), + }, + }, + }, + "filter for non existing keys": { + givenYAML: ` +inputs: + - type: event/file +filebeat: + - type: docker +output: + logstash: + port: 5 +`, + expectedYAML: ``, + rule: &RuleList{ + Rules: []Rule{ + Filter("no-output", "no-inputs"), + }, + }, + }, + + "filter for values": { + givenYAML: ` +inputs: + - type: log + - type: tcp + - type: udp +`, + expectedYAML: ` +inputs: + - type: log + - type: tcp +`, + rule: &RuleList{ + Rules: []Rule{ + FilterValues("inputs", "type", "log", "tcp"), + }, + }, + }, + "filter for regexp": { + givenYAML: ` +inputs: + - type: metric/log + - type: metric/tcp + - type: udp + - type: unknown +`, + expectedYAML: ` +inputs: + - type: metric/log + - type: metric/tcp +`, + rule: &RuleList{ + Rules: []Rule{ + FilterValuesWithRegexp("inputs", "type", regexp.MustCompile("^metric/.*")), + }, + }, + }, + "translate with regexp": { + givenYAML: ` +inputs: + - type: metric/log + - type: metric/tcp +`, + expectedYAML: ` +inputs: + - type: log + - type: tcp +`, + rule: &RuleList{ + Rules: []Rule{ + Map("inputs", TranslateWithRegexp("type", regexp.MustCompile("^metric/(.*)"), "$1")), + }, + }, + }, + } + + for name, test := range testcases { + t.Run(name, func(t *testing.T) { + a, err := makeASTFromYAML(test.givenYAML) + require.NoError(t, err) + + err = test.rule.Apply(a) + require.NoError(t, err) + + v := &MapVisitor{} + a.Accept(v) + + var m map[string]interface{} + if len(test.expectedYAML) == 0 { + m = make(map[string]interface{}) + } else { + err := yamltest.FromYAML([]byte(test.expectedYAML), &m) + require.NoError(t, err) + } + + if !assert.True(t, cmp.Equal(v.Content, m)) { + diff := cmp.Diff(v.Content, m) + if diff != "" { + t.Errorf("mismatch (-want +got):\n%s", diff) + } + } + }) + } +} + +func makeASTFromYAML(yamlStr string) (*AST, error) { + var m map[string]interface{} + if err := yaml.Unmarshal([]byte(yamlStr), &m); err != nil { + return nil, err + } + + return NewAST(m) +} + +func TestSerialization(t *testing.T) { + value := NewRuleList( + Rename("from-value", "to-value"), + Copy("from-value", "to-value"), + Translate("path-value", map[string]interface{}{ + "key-v-1": "value-v-1", + "key-v-2": "value-v-2", + }), + TranslateWithRegexp("path-value", regexp.MustCompile("^metric/(.+)"), "log/$1"), + Map("path-value", + Rename("from-value", "to-value"), + Copy("from-value", "to-value"), + ), + Filter("f1", "f2"), + FilterValues("select-v", "key-v", "v1", "v2"), + FilterValuesWithRegexp("inputs", "type", regexp.MustCompile("^metric/.*")), + ExtractListItem("path.p", "item", "target"), + ) + + y := `- rename: + from: from-value + to: to-value +- copy: + from: from-value + to: to-value +- translate: + path: path-value + mapper: + key-v-1: value-v-1 + key-v-2: value-v-2 +- translate_with_regexp: + path: path-value + re: ^metric/(.+) + with: log/$1 +- map: + path: path-value + rules: + - rename: + from: from-value + to: to-value + - copy: + from: from-value + to: to-value +- filter: + selectors: + - f1 + - f2 +- filter_values: + selector: select-v + key: key-v + values: + - v1 + - v2 +- filter_values_with_regexp: + key: type + re: ^metric/.* + selector: inputs +- extract_list_items: + path: path.p + item: item + to: target +` + + t.Run("serialize_rules", func(t *testing.T) { + b, err := yaml.Marshal(value) + require.NoError(t, err) + assert.Equal(t, string(b), y) + }) + + t.Run("unserialize_rules", func(t *testing.T) { + v := &RuleList{} + err := yaml.Unmarshal([]byte(y), v) + require.NoError(t, err) + assert.Equal(t, value, v) + }) +} diff --git a/x-pack/agent/pkg/agent/transpiler/visitor.go b/x-pack/agent/pkg/agent/transpiler/visitor.go new file mode 100644 index 00000000000..d527fa67e72 --- /dev/null +++ b/x-pack/agent/pkg/agent/transpiler/visitor.go @@ -0,0 +1,31 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package transpiler + +// Visitor defines the interface to use when visiting all the nodes in the Tree. +type Visitor interface { + OnDict() VisitorDict + OnList() VisitorList + OnStr(string) + OnInt(int) + OnUInt(uint64) + OnFloat(float64) + OnBool(bool) +} + +// VisitorDict to use when visiting a Dict. +type VisitorDict interface { + OnKey(string) + Visitor() Visitor + OnValue(Visitor) + OnComplete() +} + +// VisitorList to use when visiting a List. +type VisitorList interface { + OnValue(Visitor) + Visitor() Visitor + OnComplete() +} diff --git a/x-pack/agent/pkg/artifact/artifact.go b/x-pack/agent/pkg/artifact/artifact.go new file mode 100644 index 00000000000..1315497dc37 --- /dev/null +++ b/x-pack/agent/pkg/artifact/artifact.go @@ -0,0 +1,43 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package artifact + +import ( + "fmt" + "path/filepath" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" +) + +var packageArchMap = map[string]string{ + "linux-binary-32": "linux-x86.tar.gz", + "linux-binary-64": "linux-x86_64.tar.gz", + "windows-binary-32": "windows-x86.zip", + "windows-binary-64": "windows-x86_64.zip", + "darwin-binary-32": "darwin-x86_64.tar.gz", + "darwin-binary-64": "darwin-x86_64.tar.gz", +} + +// GetArtifactName constructs a path to a downloaded artifact +func GetArtifactName(program, version, operatingSystem, arch string) (string, error) { + key := fmt.Sprintf("%s-binary-%s", operatingSystem, arch) + suffix, found := packageArchMap[key] + if !found { + return "", errors.New(fmt.Sprintf("'%s' is not a valid combination for a package", key), errors.TypeConfig) + } + + return fmt.Sprintf("%s-%s-%s", program, version, suffix), nil +} + +// GetArtifactPath returns a full path of artifact for a program in specific version +func GetArtifactPath(programName, version, operatingSystem, arch, targetDir string) (string, error) { + artifactName, err := GetArtifactName(programName, version, operatingSystem, arch) + if err != nil { + return "", err + } + + fullPath := filepath.Join(targetDir, artifactName) + return fullPath, nil +} diff --git a/x-pack/agent/pkg/artifact/config.go b/x-pack/agent/pkg/artifact/config.go new file mode 100644 index 00000000000..363cb50c499 --- /dev/null +++ b/x-pack/agent/pkg/artifact/config.go @@ -0,0 +1,76 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package artifact + +import ( + "runtime" + "strings" + "time" +) + +// Config is a configuration used for verifier and downloader +type Config struct { + // OperatingSystem: operating system [linux, windows, darwin] + OperatingSystem string `json:"-" config:",ignore"` + + // Architecture: target architecture [32, 64] + Architecture string `json:"-" config:",ignore"` + + // BeatsSourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/beats/ + BeatsSourceURI string `json:"sourceURI" config:"sourceURI"` + + // TargetDirectory: path to the directory containing downloaded packages + TargetDirectory string `json:"targetDirectory" config:"target_directory"` + + // Timeout: timeout for downloading package + Timeout time.Duration `json:"timeout" config:"timeout"` + + // PgpFile: filepath to a public key used for verifying downloaded artifacts + // if not file is present agent will try to load public key from elastic.co website. + PgpFile string `json:"pgpfile" config:"pgpfile"` + + // InstallPath: path to the directory containing installed packages + InstallPath string `yaml:"installPath" config:"install_path"` + + // DropPath: path where agent can find installation files for download. + // Difference between this and TargetDirectory is that when fetching packages (from web or fs) they are stored in TargetDirectory + // DropPath specifies where Filesystem downloader can find packages which will then be placed in TargetDirectory. This can be + // local or network disk. + // If not provided FileSystem Downloader will fallback to /beats subfolder of agent directory. + DropPath string `yaml:"dropPath" config:"drop_path"` +} + +// OS return configured operating system or falls back to runtime.GOOS +func (c *Config) OS() string { + if c.OperatingSystem != "" { + return c.OperatingSystem + } + + switch runtime.GOOS { + case "windows": + c.OperatingSystem = "windows" + case "darwin": + c.OperatingSystem = "darwin" + default: + c.OperatingSystem = "linux" + } + + return c.OperatingSystem +} + +// Arch return configured architecture or falls back to 32bit +func (c *Config) Arch() string { + if c.Architecture != "" { + return c.Architecture + } + + arch := "32" + if strings.Contains(runtime.GOARCH, "64") { + arch = "64" + } + + c.Architecture = arch + return c.Architecture +} diff --git a/x-pack/agent/pkg/artifact/download/composed/downloader.go b/x-pack/agent/pkg/artifact/download/composed/downloader.go new file mode 100644 index 00000000000..b8132a689b0 --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/composed/downloader.go @@ -0,0 +1,48 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package composed + +import ( + "context" + + "github.com/hashicorp/go-multierror" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download" +) + +// Downloader is a downloader with a predefined set of downloaders. +// During each download call it tries to call the first one and on failure fallbacks to +// the next one. +// Error is returned if all of them fail. +type Downloader struct { + dd []download.Downloader +} + +// NewDownloader creates a downloader out of predefined set of downloaders. +// During each download call it tries to call the first one and on failure fallbacks to +// the next one. +// Error is returned if all of them fail. +func NewDownloader(downloaders ...download.Downloader) *Downloader { + return &Downloader{ + dd: downloaders, + } +} + +// Download fetches the package from configured source. +// Returns absolute path to downloaded package and an error. +func (e *Downloader) Download(ctx context.Context, programName, version string) (string, error) { + var err error + + for _, d := range e.dd { + s, e := d.Download(ctx, programName, version) + if e == nil { + return s, nil + } + + err = multierror.Append(err, e) + } + + return "", err +} diff --git a/x-pack/agent/pkg/artifact/download/composed/downloader_test.go b/x-pack/agent/pkg/artifact/download/composed/downloader_test.go new file mode 100644 index 00000000000..472d8689ef3 --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/composed/downloader_test.go @@ -0,0 +1,78 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package composed + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download" +) + +type FailingDownloader struct { + called bool +} + +func (d *FailingDownloader) Download(ctx context.Context, a, b string) (string, error) { + d.called = true + return "", errors.New("failing") +} + +func (d *FailingDownloader) Called() bool { return d.called } + +type SuccDownloader struct { + called bool +} + +func (d *SuccDownloader) Download(ctx context.Context, a, b string) (string, error) { + d.called = true + return "succ", nil +} +func (d *SuccDownloader) Called() bool { return d.called } + +func TestComposed(t *testing.T) { + testCases := []testCase{ + testCase{ + downloaders: []CheckableDownloader{&FailingDownloader{}, &SuccDownloader{}}, + checkFunc: func(d []CheckableDownloader) bool { return d[0].Called() && d[1].Called() }, + expectedResult: true, + }, testCase{ + downloaders: []CheckableDownloader{&SuccDownloader{}, &SuccDownloader{}}, + checkFunc: func(d []CheckableDownloader) bool { return d[0].Called() && !d[1].Called() }, + expectedResult: true, + }, testCase{ + downloaders: []CheckableDownloader{&SuccDownloader{}, &FailingDownloader{}}, + checkFunc: func(d []CheckableDownloader) bool { return d[0].Called() && !d[1].Called() }, + expectedResult: true, + }, testCase{ + downloaders: []CheckableDownloader{&FailingDownloader{}, &FailingDownloader{}}, + checkFunc: func(d []CheckableDownloader) bool { return d[0].Called() && d[1].Called() }, + expectedResult: false, + }, + } + + for _, tc := range testCases { + d := NewDownloader(tc.downloaders[0], tc.downloaders[1]) + r, _ := d.Download(nil, "a", "b") + + assert.Equal(t, tc.expectedResult, r == "succ") + + assert.True(t, tc.checkFunc(tc.downloaders)) + } +} + +type CheckableDownloader interface { + download.Downloader + Called() bool +} + +type testCase struct { + downloaders []CheckableDownloader + checkFunc func(downloaders []CheckableDownloader) bool + expectedResult bool +} diff --git a/x-pack/agent/pkg/artifact/download/composed/verifier.go b/x-pack/agent/pkg/artifact/download/composed/verifier.go new file mode 100644 index 00000000000..3c7bb5d81f3 --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/composed/verifier.go @@ -0,0 +1,45 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package composed + +import ( + "github.com/hashicorp/go-multierror" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download" +) + +// Verifier is a verifier with a predefined set of verifiers. +// During each verify call it tries to call the first one and on failure fallbacks to +// the next one. +// Error is returned if all of them fail. +type Verifier struct { + vv []download.Verifier +} + +// NewVerifier creates a verifier composed out of predefined set of verifiers. +// During each verify call it tries to call the first one and on failure fallbacks to +// the next one. +// Error is returned if all of them fail. +func NewVerifier(verifiers ...download.Verifier) *Verifier { + return &Verifier{ + vv: verifiers, + } +} + +// Verify checks the package from configured source. +func (e *Verifier) Verify(programName, version string) (bool, error) { + var err error + + for _, v := range e.vv { + b, e := v.Verify(programName, version) + if e == nil { + return b, nil + } + + err = multierror.Append(err, e) + } + + return false, err +} diff --git a/x-pack/agent/pkg/artifact/download/downloader.go b/x-pack/agent/pkg/artifact/download/downloader.go new file mode 100644 index 00000000000..0b7010062e9 --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/downloader.go @@ -0,0 +1,12 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package download + +import "context" + +// Downloader is an interface allowing download of an artifact +type Downloader interface { + Download(ctx context.Context, programName, version string) (string, error) +} diff --git a/x-pack/agent/pkg/artifact/download/fs/downloader.go b/x-pack/agent/pkg/artifact/download/fs/downloader.go new file mode 100644 index 00000000000..6c208f0656b --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/fs/downloader.go @@ -0,0 +1,96 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fs + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" +) + +const ( + packagePermissions = 0660 + beatsSubfolder = "beats" +) + +// Downloader is a downloader able to fetch artifacts from elastic.co web page. +type Downloader struct { + dropPath string + config *artifact.Config +} + +// NewDownloader creates and configures Elastic Downloader +func NewDownloader(config *artifact.Config) *Downloader { + return &Downloader{ + config: config, + dropPath: getDropPath(config), + } +} + +// Download fetches the package from configured source. +// Returns absolute path to downloaded package and an error. +func (e *Downloader) Download(_ context.Context, programName, version string) (string, error) { + // create a destination directory root/program + destinationDir := filepath.Join(e.config.TargetDirectory, programName) + if err := os.MkdirAll(destinationDir, os.ModeDir); err != nil { + return "", errors.New(err, "creating directory for downloaded artifact failed", errors.TypeFilesystem, errors.M(errors.MetaKeyPath, destinationDir)) + } + + // download from source to dest + path, err := e.download(e.config.OS(), programName, version) + if err != nil { + os.Remove(path) + } + + return path, err +} + +func (e *Downloader) download(operatingSystem, programName, version string) (string, error) { + filename, err := artifact.GetArtifactName(programName, version, operatingSystem, e.config.Arch()) + if err != nil { + return "", errors.New(err, "generating package name failed") + } + + fullPath, err := artifact.GetArtifactPath(programName, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) + if err != nil { + return "", errors.New(err, "generating package path failed") + } + + sourcePath := filepath.Join(e.dropPath, filename) + sourceFile, err := os.Open(sourcePath) + if err != nil { + return "", errors.New(err, fmt.Sprintf("package '%s' not found", sourcePath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fullPath)) + } + defer sourceFile.Close() + + destinationFile, err := os.OpenFile(fullPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, packagePermissions) + if err != nil { + return "", errors.New(err, "creating package file failed", errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fullPath)) + } + defer destinationFile.Close() + + _, err = io.Copy(destinationFile, sourceFile) + return fullPath, nil +} + +func getDropPath(cfg *artifact.Config) string { + // if drop path is not provided fallback to beats subfolder + if cfg == nil || cfg.DropPath == "" { + return beatsSubfolder + } + + // if droppath does not exist fallback to beats subfolder + stat, err := os.Stat(cfg.DropPath) + if err != nil || !stat.IsDir() { + return beatsSubfolder + } + + return cfg.DropPath +} diff --git a/x-pack/agent/pkg/artifact/download/fs/verifier.go b/x-pack/agent/pkg/artifact/download/fs/verifier.go new file mode 100644 index 00000000000..2a16790f445 --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/fs/verifier.go @@ -0,0 +1,105 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "golang.org/x/crypto/openpgp" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" +) + +const ( + ascSuffix = ".asc" +) + +// Verifier verifies a downloaded package by comparing with public ASC +// file from elastic.co website. +type Verifier struct { + config *artifact.Config + pgpBytes []byte +} + +// NewVerifier create a verifier checking downloaded package on preconfigured +// location agains a key stored on elastic.co website. +func NewVerifier(config *artifact.Config) (*Verifier, error) { + v := &Verifier{ + config: config, + } + + if err := v.loadPGP(config.PgpFile); err != nil { + return nil, errors.New(err, "loading PGP") + } + + return v, nil +} + +// Verify checks downloaded package on preconfigured +// location agains a key stored on elastic.co website. +func (v *Verifier) Verify(programName, version string) (bool, error) { + filename, err := artifact.GetArtifactName(programName, version, v.config.OS(), v.config.Arch()) + if err != nil { + return false, errors.New(err, "retrieving package name") + } + + fullPath := filepath.Join(v.config.TargetDirectory, filename) + + ascBytes, err := v.getPublicAsc(filename) + if err != nil { + return false, err + } + + pubkeyReader := bytes.NewReader(v.pgpBytes) + ascReader := bytes.NewReader(ascBytes) + fileReader, err := os.OpenFile(fullPath, os.O_RDONLY, 0666) + if err != nil { + return false, errors.New(err, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fullPath)) + } + defer fileReader.Close() + + keyring, err := openpgp.ReadArmoredKeyRing(pubkeyReader) + if err != nil { + return false, errors.New(err, "read armored key ring", errors.TypeSecurity) + } + _, err = openpgp.CheckArmoredDetachedSignature(keyring, fileReader, ascReader) + if err != nil { + return false, errors.New(err, "check detached signature", errors.TypeSecurity) + } + + return true, nil +} + +func (v *Verifier) getPublicAsc(filename string) ([]byte, error) { + ascFile := fmt.Sprintf("%s%s", filename, ascSuffix) + fullPath := filepath.Join(beatsSubfolder, ascFile) + + b, err := ioutil.ReadFile(fullPath) + if err != nil { + return nil, errors.New(err, fmt.Sprintf("fetching asc file from '%s'", fullPath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fullPath)) + } + + return b, nil +} + +func (v *Verifier) loadPGP(file string) error { + var err error + + if file == "" { + return errors.New("pgp file not specified for verifier", errors.TypeConfig) + } + + v.pgpBytes, err = ioutil.ReadFile(file) + if err != nil { + return errors.New(err, errors.TypeFilesystem) + } + + return nil +} diff --git a/x-pack/agent/pkg/artifact/download/http/downloader.go b/x-pack/agent/pkg/artifact/download/http/downloader.go new file mode 100644 index 00000000000..ad569fbdf7a --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/http/downloader.go @@ -0,0 +1,120 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package http + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "strings" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/agent/pkg/release" +) + +const ( + packagePermissions = 0660 +) + +var headers = map[string]string{ + "User-Agent": fmt.Sprintf("Beat agent v%s", release.Version()), +} + +// Downloader is a downloader able to fetch artifacts from elastic.co web page. +type Downloader struct { + config *artifact.Config + client http.Client +} + +// NewDownloader creates and configures Elastic Downloader +func NewDownloader(config *artifact.Config) *Downloader { + client := http.Client{Timeout: config.Timeout} + rt := withHeaders(client.Transport, headers) + client.Transport = rt + return NewDownloaderWithClient(config, client) +} + +// NewDownloaderWithClient creates Elastic Downloader with specific client used +func NewDownloaderWithClient(config *artifact.Config, client http.Client) *Downloader { + return &Downloader{ + config: config, + client: client, + } +} + +// Download fetches the package from configured source. +// Returns absolute path to downloaded package and an error. +func (e *Downloader) Download(ctx context.Context, programName, version string) (string, error) { + // download from source to dest + path, err := e.download(ctx, e.config.OS(), programName, version) + if err != nil { + os.Remove(path) + } + + return path, err +} + +func (e *Downloader) composeURI(programName, packageName string) (string, error) { + upstream := e.config.BeatsSourceURI + if !strings.HasPrefix(upstream, "http") && !strings.HasPrefix(upstream, "file") && !strings.HasPrefix(upstream, "/") { + // always default to https + upstream = fmt.Sprintf("https://%s", upstream) + } + + // example: https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.1.1-x86_64.rpm + uri, err := url.Parse(upstream) + if err != nil { + return "", errors.New(err, "invalid upstream URI", errors.TypeConfig) + } + + uri.Path = path.Join(uri.Path, programName, packageName) + return uri.String(), nil +} + +func (e *Downloader) download(ctx context.Context, operatingSystem, programName, version string) (string, error) { + filename, err := artifact.GetArtifactName(programName, version, operatingSystem, e.config.Arch()) + if err != nil { + return "", errors.New(err, "generating package name failed") + } + + fullPath, err := artifact.GetArtifactPath(programName, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) + if err != nil { + return "", errors.New(err, "generating package path failed") + } + + sourceURI, err := e.composeURI(programName, filename) + if err != nil { + return "", err + } + + req, err := http.NewRequest("GET", sourceURI, nil) + if err != nil { + return "", errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + } + + resp, err := e.client.Do(req.WithContext(ctx)) + if err != nil { + return "", errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return "", errors.New(fmt.Sprintf("call to '%s' returned unsuccessful status code: %d", sourceURI, resp.StatusCode), errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + } + + destinationFile, err := os.OpenFile(fullPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, packagePermissions) + if err != nil { + return "", errors.New(err, "creating package file failed", errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fullPath)) + } + defer destinationFile.Close() + + _, err = io.Copy(destinationFile, resp.Body) + return fullPath, nil +} diff --git a/x-pack/agent/pkg/artifact/download/http/elastic_test.go b/x-pack/agent/pkg/artifact/download/http/elastic_test.go new file mode 100644 index 00000000000..f13226b1923 --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/http/elastic_test.go @@ -0,0 +1,182 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package http + +import ( + "context" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" +) + +const ( + beatName = "filebeat" + version = "7.5.1" + sourcePattern = "/downloads/beats/filebeat/" + source = "http://artifacts.elastic.co/downloads/beats/" +) + +type testCase struct { + system string + arch string +} + +func TestDownload(t *testing.T) { + targetDir, err := ioutil.TempDir(os.TempDir(), "") + if err != nil { + t.Fatal(err) + } + + timeout := 30 * time.Second + testCases := getTestCases() + elasticClient := getElasticCoClient() + + config := &artifact.Config{ + BeatsSourceURI: source, + TargetDirectory: targetDir, + Timeout: timeout, + } + + for _, testCase := range testCases { + testName := fmt.Sprintf("%s-binary-%s", testCase.system, testCase.arch) + t.Run(testName, func(t *testing.T) { + config.OperatingSystem = testCase.system + config.Architecture = testCase.arch + + testClient := NewDownloaderWithClient(config, elasticClient) + artifactPath, err := testClient.Download(context.Background(), beatName, version) + if err != nil { + t.Fatal(err) + } + + _, err = os.Stat(artifactPath) + if err != nil { + t.Fatal(err) + } + + os.Remove(artifactPath) + }) + } +} + +func TestVerify(t *testing.T) { + // skip so beats are not fetched from upstream, test only locally when change is made + t.Skip() + + targetDir, err := ioutil.TempDir(os.TempDir(), "") + if err != nil { + t.Fatal(err) + } + + timeout := 30 * time.Second + testCases := getRandomTestCases() + + config := &artifact.Config{ + BeatsSourceURI: source, + TargetDirectory: targetDir, + Timeout: timeout, + } + + for _, testCase := range testCases { + testName := fmt.Sprintf("%s-binary-%s", testCase.system, testCase.arch) + t.Run(testName, func(t *testing.T) { + config.OperatingSystem = testCase.system + config.Architecture = testCase.arch + + testClient := NewDownloader(config) + + artifact, err := testClient.Download(context.Background(), beatName, version) + if err != nil { + t.Fatal(err) + } + + _, err = os.Stat(artifact) + if err != nil { + t.Fatal(err) + } + + testVerifier, err := NewVerifier(config) + if err != nil { + t.Fatal(err) + } + + isOk, err := testVerifier.Verify(beatName, version) + if err != nil { + t.Fatal(err) + } + + if !isOk { + t.Fatal("verify failed") + } + + os.Remove(artifact) + }) + } +} + +func getTestCases() []testCase { + // always test random package to save time + return []testCase{ + {"linux", "32"}, + {"linux", "64"}, + {"darwin", "32"}, + {"darwin", "64"}, + {"windows", "32"}, + {"windows", "64"}, + } +} + +func getRandomTestCases() []testCase { + tt := getTestCases() + + rand.Seed(time.Now().UnixNano()) + first := rand.Intn(len(tt)) + second := rand.Intn(len(tt)) + + return []testCase{ + tt[first], + tt[second], + } +} + +func getElasticCoClient() http.Client { + correctValues := map[string]struct{}{ + fmt.Sprintf("%s-%s-%s", beatName, version, "i386.deb"): struct{}{}, + fmt.Sprintf("%s-%s-%s", beatName, version, "amd64.deb"): struct{}{}, + fmt.Sprintf("%s-%s-%s", beatName, version, "i686.rpm"): struct{}{}, + fmt.Sprintf("%s-%s-%s", beatName, version, "x86_64.rpm"): struct{}{}, + fmt.Sprintf("%s-%s-%s", beatName, version, "linux-x86.tar.gz"): struct{}{}, + fmt.Sprintf("%s-%s-%s", beatName, version, "linux-x86_64.tar.gz"): struct{}{}, + fmt.Sprintf("%s-%s-%s", beatName, version, "windows-x86.zip"): struct{}{}, + fmt.Sprintf("%s-%s-%s", beatName, version, "windows-x86_64.zip"): struct{}{}, + fmt.Sprintf("%s-%s-%s", beatName, version, "darwin-x86_64.tar.gz"): struct{}{}, + } + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + packageName := r.URL.Path[len(sourcePattern):] + if _, ok := correctValues[packageName]; !ok { + w.WriteHeader(http.StatusInternalServerError) + } + + w.Write([]byte(packageName)) + }) + server := httptest.NewServer(handler) + + return http.Client{ + Transport: &http.Transport{ + DialContext: func(_ context.Context, network, _ string) (net.Conn, error) { + return net.Dial(network, server.Listener.Addr().String()) + }, + }, + } +} diff --git a/x-pack/agent/pkg/artifact/download/http/headers_rtt.go b/x-pack/agent/pkg/artifact/download/http/headers_rtt.go new file mode 100644 index 00000000000..23bc1410b5c --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/http/headers_rtt.go @@ -0,0 +1,26 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package http + +import "net/http" + +func withHeaders(rtt http.RoundTripper, headers map[string]string) http.RoundTripper { + if rtt == nil { + rtt = http.DefaultTransport + } + return &rttWithHeaders{target: rtt, headers: headers} +} + +type rttWithHeaders struct { + target http.RoundTripper + headers map[string]string +} + +func (r *rttWithHeaders) RoundTrip(req *http.Request) (*http.Response, error) { + for k, v := range r.headers { + req.Header.Add(k, v) + } + return r.target.RoundTrip(req) +} diff --git a/x-pack/agent/pkg/artifact/download/http/headers_rtt_test.go b/x-pack/agent/pkg/artifact/download/http/headers_rtt_test.go new file mode 100644 index 00000000000..31041296da6 --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/http/headers_rtt_test.go @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package http + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/release" +) + +func TestAddingHeaders(t *testing.T) { + msg := []byte("OK") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + assert.Equal(t, fmt.Sprintf("Beat agent v%s", release.Version()), req.Header.Get("User-Agent")) + w.Write(msg) + })) + defer server.Close() + + c := server.Client() + rtt := withHeaders(c.Transport, headers) + + c.Transport = rtt + resp, err := c.Get(server.URL) + require.NoError(t, err) + b, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + require.NoError(t, err) + assert.Equal(t, b, msg) +} diff --git a/x-pack/agent/pkg/artifact/download/http/verifier.go b/x-pack/agent/pkg/artifact/download/http/verifier.go new file mode 100644 index 00000000000..dc5395d4e97 --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/http/verifier.go @@ -0,0 +1,156 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package http + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + "strings" + + "golang.org/x/crypto/openpgp" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" +) + +const ( + publicKeyURI = "https://artifacts.elastic.co/GPG-KEY-elasticsearch" + ascSuffix = ".asc" +) + +// Verifier verifies a downloaded package by comparing with public ASC +// file from elastic.co website. +type Verifier struct { + config *artifact.Config + client http.Client + pgpBytes []byte +} + +// NewVerifier create a verifier checking downloaded package on preconfigured +// location agains a key stored on elastic.co website. +func NewVerifier(config *artifact.Config) (*Verifier, error) { + client := http.Client{Timeout: config.Timeout} + rtt := withHeaders(client.Transport, headers) + client.Transport = rtt + v := &Verifier{ + config: config, + client: client, + } + + if err := v.loadPGP(config.PgpFile); err != nil { + return nil, errors.New(err, "loading PGP") + } + + return v, nil +} + +// Verify checks downloaded package on preconfigured +// location agains a key stored on elastic.co website. +func (v *Verifier) Verify(programName, version string) (bool, error) { + filename, err := artifact.GetArtifactName(programName, version, v.config.OS(), v.config.Arch()) + if err != nil { + return false, errors.New(err, "retrieving package name") + } + + fullPath, err := artifact.GetArtifactPath(programName, version, v.config.OS(), v.config.Arch(), v.config.TargetDirectory) + if err != nil { + return false, errors.New(err, "retrieving package path") + } + + ascURI, err := v.composeURI(programName, filename) + if err != nil { + return false, errors.New(err, "composing URI for fetching asc file", errors.TypeNetwork) + } + + ascBytes, err := v.getPublicAsc(ascURI) + if err != nil { + return false, errors.New(err, fmt.Sprintf("fetching asc file from %s", ascURI), errors.TypeNetwork, errors.M(errors.MetaKeyURI, ascURI)) + } + + pubkeyReader := bytes.NewReader(v.pgpBytes) + ascReader := bytes.NewReader(ascBytes) + fileReader, err := os.OpenFile(fullPath, os.O_RDONLY, 0666) + if err != nil { + return false, errors.New(err, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fullPath)) + } + defer fileReader.Close() + + keyring, err := openpgp.ReadArmoredKeyRing(pubkeyReader) + if err != nil { + return false, errors.New(err, "read armored key ring", errors.TypeSecurity) + } + _, err = openpgp.CheckArmoredDetachedSignature(keyring, fileReader, ascReader) + if err != nil { + return false, errors.New(err, "check detached signature", errors.TypeSecurity) + } + + return true, nil +} + +func (v *Verifier) composeURI(programName, filename string) (string, error) { + upstream := v.config.BeatsSourceURI + if !strings.HasPrefix(upstream, "http") && !strings.HasPrefix(upstream, "file") && !strings.HasPrefix(upstream, "/") { + // always default to https + upstream = fmt.Sprintf("https://%s", upstream) + } + + // example: https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.1.1-x86_64.rpm + uri, err := url.Parse(upstream) + if err != nil { + return "", errors.New(err, "invalid upstream URI", errors.TypeNetwork, errors.M(errors.MetaKeyURI, upstream)) + } + + uri.Path = path.Join(uri.Path, programName, filename+ascSuffix) + return uri.String(), nil +} + +func (v *Verifier) getPublicAsc(sourceURI string) ([]byte, error) { + resp, err := v.client.Get(sourceURI) + if err != nil { + return nil, errors.New(err, "failed loading public key", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, errors.New(fmt.Sprintf("call to '%s' returned unsuccessful status code: %d", sourceURI, resp.StatusCode), errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + } + + return ioutil.ReadAll(resp.Body) +} + +func (v *Verifier) loadPGP(file string) error { + var err error + + if file == "" { + v.pgpBytes, err = v.loadPGPFromWeb() + return err + } + + v.pgpBytes, err = ioutil.ReadFile(file) + if err != nil { + return errors.New(err, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, file)) + } + + return nil +} + +func (v *Verifier) loadPGPFromWeb() ([]byte, error) { + resp, err := v.client.Get(publicKeyURI) + if err != nil { + return nil, errors.New(err, "failed loading public key", errors.TypeNetwork, errors.M(errors.MetaKeyURI, publicKeyURI)) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, errors.New(fmt.Sprintf("call to '%s' returned unsuccessful status code: %d", publicKeyURI, resp.StatusCode), errors.TypeNetwork, errors.M(errors.MetaKeyURI, publicKeyURI)) + } + + return ioutil.ReadAll(resp.Body) +} diff --git a/x-pack/agent/pkg/artifact/download/localremote/downloader.go b/x-pack/agent/pkg/artifact/download/localremote/downloader.go new file mode 100644 index 00000000000..7faf6a500dd --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/localremote/downloader.go @@ -0,0 +1,19 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package localremote + +import ( + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download/composed" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download/fs" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download/http" +) + +// NewDownloader creates a downloader which first checks local directory +// and then fallbacks to remote if configured. +func NewDownloader(config *artifact.Config, downloaders ...download.Downloader) download.Downloader { + return composed.NewDownloader(fs.NewDownloader(config), http.NewDownloader(config)) +} diff --git a/x-pack/agent/pkg/artifact/download/localremote/verifier.go b/x-pack/agent/pkg/artifact/download/localremote/verifier.go new file mode 100644 index 00000000000..c4570aee2ee --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/localremote/verifier.go @@ -0,0 +1,28 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package localremote + +import ( + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download/composed" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download/fs" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/download/http" +) + +// NewVerifier creates a downloader which first checks local directory +// and then fallbacks to remote if configured. +func NewVerifier(config *artifact.Config, downloaders ...download.Downloader) (download.Verifier, error) { + fsVer, err := fs.NewVerifier(config) + if err != nil { + return nil, err + } + remoteVer, err := http.NewVerifier(config) + if err != nil { + return nil, err + } + + return composed.NewVerifier(fsVer, remoteVer), nil +} diff --git a/x-pack/agent/pkg/artifact/download/verifier.go b/x-pack/agent/pkg/artifact/download/verifier.go new file mode 100644 index 00000000000..6aa4dc4abe4 --- /dev/null +++ b/x-pack/agent/pkg/artifact/download/verifier.go @@ -0,0 +1,10 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package download + +// Verifier is an interface verifying GPG key of a downloaded artifact +type Verifier interface { + Verify(programName, version string) (bool, error) +} diff --git a/x-pack/agent/pkg/artifact/install/installer.go b/x-pack/agent/pkg/artifact/install/installer.go new file mode 100644 index 00000000000..7ccb6321918 --- /dev/null +++ b/x-pack/agent/pkg/artifact/install/installer.go @@ -0,0 +1,43 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package install + +import ( + "errors" + "runtime" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/install/tar" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact/install/zip" +) + +var ( + // ErrConfigNotProvided is returned when provided config is nil + ErrConfigNotProvided = errors.New("config not provided") +) + +// Installer is an interface allowing installation of an artifact +type Installer interface { + // Install installs an artifact and returns + // location of the installed program + // error if something went wrong + Install(programName, version, installDir string) error +} + +// NewInstaller returns a correct installer associated with a +// package type: +// - rpm -> rpm installer +// - deb -> deb installer +// - binary -> zip installer on windows, tar installer on linux and mac +func NewInstaller(config *artifact.Config) (Installer, error) { + if config == nil { + return nil, ErrConfigNotProvided + } + + if runtime.GOOS == "windows" { + return zip.NewInstaller(config) + } + return tar.NewInstaller(config) +} diff --git a/x-pack/agent/pkg/artifact/install/tar/tar_installer.go b/x-pack/agent/pkg/artifact/install/tar/tar_installer.go new file mode 100644 index 00000000000..998589bad8a --- /dev/null +++ b/x-pack/agent/pkg/artifact/install/tar/tar_installer.go @@ -0,0 +1,111 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package tar + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" +) + +// Installer or tar packages +type Installer struct { + config *artifact.Config +} + +// NewInstaller creates an installer able to install tar packages +func NewInstaller(config *artifact.Config) (*Installer, error) { + return &Installer{ + config: config, + }, nil +} + +// Install performs installation of program in a specific version. +// It expects package to be already downloaded. +func (i *Installer) Install(programName, version, _ string) error { + artifactPath, err := artifact.GetArtifactPath(programName, version, i.config.OS(), i.config.Arch(), i.config.TargetDirectory) + if err != nil { + return err + } + + f, err := os.Open(artifactPath) + if err != nil { + return errors.New(fmt.Sprintf("artifact for '%s' version '%s' could not be found at '%s'", programName, version, artifactPath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, artifactPath)) + } + defer f.Close() + + return unpack(f, i.config.InstallPath) + +} + +func unpack(r io.Reader, dir string) error { + zr, err := gzip.NewReader(r) + if err != nil { + return errors.New("requires gzip-compressed body", err, errors.TypeFilesystem) + } + + tr := tar.NewReader(zr) + + for { + f, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + if !validFileName(f.Name) { + return errors.New("tar contained invalid filename: %q", f.Name, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, f.Name)) + } + rel := filepath.FromSlash(f.Name) + abs := filepath.Join(dir, rel) + + fi := f.FileInfo() + mode := fi.Mode() + switch { + case mode.IsRegular(): + // just to be sure, it should already be created by Dir type + if err := os.MkdirAll(filepath.Dir(abs), 0755); err != nil { + return errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) + } + + wf, err := os.OpenFile(abs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode.Perm()) + if err != nil { + return errors.New(err, "TarInstaller: creating file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) + } + + _, err = io.Copy(wf, tr) + if closeErr := wf.Close(); closeErr != nil && err == nil { + err = closeErr + } + if err != nil { + return fmt.Errorf("TarInstaller: error writing to %s: %v", abs, err) + } + case mode.IsDir(): + if err := os.MkdirAll(abs, 0755); err != nil { + return errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) + } + default: + return errors.New(fmt.Sprintf("tar file entry %s contained unsupported file type %v", f.Name, mode), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, f.Name)) + } + } + + return nil +} + +func validFileName(p string) bool { + if p == "" || strings.Contains(p, `\`) || strings.HasPrefix(p, "/") || strings.Contains(p, "../") { + return false + } + return true +} diff --git a/x-pack/agent/pkg/artifact/install/zip/zip_installer.go b/x-pack/agent/pkg/artifact/install/zip/zip_installer.go new file mode 100644 index 00000000000..6e8d36bb9b0 --- /dev/null +++ b/x-pack/agent/pkg/artifact/install/zip/zip_installer.go @@ -0,0 +1,71 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package zip + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" +) + +const ( + // powershellCmdTemplate uses elevated execution policy to avoid failure in case script execution is disabled on the system + powershellCmdTemplate = `set-executionpolicy unrestricted; cd %s; .\install-service-%s.ps1` +) + +// Installer or zip packages +type Installer struct { + config *artifact.Config +} + +// NewInstaller creates an installer able to install zip packages +func NewInstaller(config *artifact.Config) (*Installer, error) { + return &Installer{ + config: config, + }, nil +} + +// Install performs installation of program in a specific version. +// It expects package to be already downloaded. +func (i *Installer) Install(programName, version, installDir string) error { + if err := i.unzip(programName, version, installDir); err != nil { + return err + } + + oldPath := filepath.Join(installDir, fmt.Sprintf("%s-%s-windows", programName, version)) + newPath := filepath.Join(installDir, strings.Title(programName)) + if err := os.Rename(oldPath, newPath); err != nil { + return errors.New(err, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, newPath)) + } + + return i.runInstall(programName, installDir) +} + +func (i *Installer) unzip(programName, version, installPath string) error { + artifactPath, err := artifact.GetArtifactPath(programName, version, i.config.OS(), i.config.Arch(), i.config.TargetDirectory) + if err != nil { + return err + } + + if _, err := os.Stat(artifactPath); err != nil { + return errors.New(fmt.Sprintf("artifact for '%s' version '%s' could not be found at '%s'", programName, version, artifactPath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, artifactPath)) + } + + powershellArg := fmt.Sprintf("Expand-Archive -Path \"%s\" -DestinationPath \"%s\"", artifactPath, installPath) + installCmd := exec.Command("powershell", "-command", powershellArg) + return installCmd.Run() +} + +func (i *Installer) runInstall(programName, installPath string) error { + powershellCmd := fmt.Sprintf(powershellCmdTemplate, installPath, programName) + + installCmd := exec.Command("powershell", "-command", powershellCmd) + return installCmd.Run() +} diff --git a/x-pack/agent/pkg/basecmd/cmd.go b/x-pack/agent/pkg/basecmd/cmd.go new file mode 100644 index 00000000000..cfe3631e5bf --- /dev/null +++ b/x-pack/agent/pkg/basecmd/cmd.go @@ -0,0 +1,19 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package basecmd + +import ( + "github.com/spf13/cobra" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/basecmd/version" + "github.com/elastic/beats/v7/x-pack/agent/pkg/cli" +) + +// NewDefaultCommandsWithArgs returns a list of default commands to executes. +func NewDefaultCommandsWithArgs(args []string, streams *cli.IOStreams) []*cobra.Command { + return []*cobra.Command{ + version.NewCommandWithArgs(streams), + } +} diff --git a/x-pack/agent/pkg/basecmd/cmd_test.go b/x-pack/agent/pkg/basecmd/cmd_test.go new file mode 100644 index 00000000000..accc4bc976e --- /dev/null +++ b/x-pack/agent/pkg/basecmd/cmd_test.go @@ -0,0 +1,16 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package basecmd + +import ( + "testing" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/cli" +) + +func TestBaseCmd(t *testing.T) { + streams, _, _, _ := cli.NewTestingIOStreams() + NewDefaultCommandsWithArgs([]string{}, streams) +} diff --git a/x-pack/agent/pkg/basecmd/version/cmd.go b/x-pack/agent/pkg/basecmd/version/cmd.go new file mode 100644 index 00000000000..da05cfdd497 --- /dev/null +++ b/x-pack/agent/pkg/basecmd/version/cmd.go @@ -0,0 +1,31 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package version + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/cli" + "github.com/elastic/beats/v7/x-pack/agent/pkg/release" +) + +// NewCommandWithArgs returns a new version command. +func NewCommandWithArgs(streams *cli.IOStreams) *cobra.Command { + return &cobra.Command{ + Use: "version", + Short: "Display the version of the agent.", + Run: func(_ *cobra.Command, _ []string) { + fmt.Fprintf( + streams.Out, + "Agent version is %s (build: %s at %s)\n", + release.Version(), + release.Commit(), + release.BuildTime(), + ) + }, + } +} diff --git a/x-pack/agent/pkg/basecmd/version/cmd_test.go b/x-pack/agent/pkg/basecmd/version/cmd_test.go new file mode 100644 index 00000000000..0395e604b42 --- /dev/null +++ b/x-pack/agent/pkg/basecmd/version/cmd_test.go @@ -0,0 +1,26 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package version + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/cli" +) + +func TestCmd(t *testing.T) { + streams, _, out, _ := cli.NewTestingIOStreams() + NewCommandWithArgs(streams).Execute() + version, err := ioutil.ReadAll(out) + + if !assert.NoError(t, err) { + return + } + assert.True(t, strings.Contains(string(version), "Agent version is")) +} diff --git a/x-pack/agent/pkg/boolexp/Boolexp.g4 b/x-pack/agent/pkg/boolexp/Boolexp.g4 new file mode 100644 index 00000000000..fd587c1c8b0 --- /dev/null +++ b/x-pack/agent/pkg/boolexp/Boolexp.g4 @@ -0,0 +1,55 @@ +// boolexp.g4 +grammar Boolexp; + +// Tokens +EQ: '=='; +NEQ: '!='; +GT: '>'; +LT: '<'; +GTE: '>='; +LTE: '<='; +AND: 'and' | 'AND' | '&&'; +OR: 'or' | 'OR' | '||'; +TRUE: 'true' | 'TRUE'; +FALSE: 'false' | 'FALSE'; +FLOAT: [0-9]+ '.' [0-9]+; +NUMBER: [0-9]+; +WHITESPACE: [ \r\n\t]+ -> skip; +NOT: 'NOT' | '!' | 'not'; +VARIABLE: BEGIN_VARIABLE [a-zA-Z0-9_.]+('.'[a-zZ0-9_]+)* END_VARIABLE; +METHODNAME: [a-zA-Z_] [a-zA-Z0-9_]*; +TEXT : '\'' ~[\r\n']* '\''; +LPAR: '('; +RPAR: ')'; +fragment BEGIN_VARIABLE: '%{['; +fragment END_VARIABLE: ']}'; + +expList: exp EOF; + +exp +: LPAR exp RPAR # ExpInParen +| NOT exp # ExpNot +| left=exp EQ right=exp # ExpArithmeticEQ +| left=exp NEQ right=exp # ExpArithmeticNEQ +| left=exp LTE right=exp # ExpArithmeticLTE +| left=exp GTE right=exp # ExpArithmeticGTE +| left=exp LT right=exp # ExpArithmeticLT +| left=exp GT right=exp # ExpArithmeticGT +| left=exp AND right=exp # ExpLogicalAnd +| left=exp OR right=exp # ExpLogicalOR +| boolean # ExpBoolean +| VARIABLE # ExpVariable +| METHODNAME LPAR arguments? RPAR # ExpFunction +| TEXT # ExpText +| FLOAT # ExpFloat +| NUMBER # ExpNumber +; + +boolean +: TRUE | FALSE +; + +arguments +: exp( ',' exp)* +; + diff --git a/x-pack/agent/pkg/boolexp/boolexp.go b/x-pack/agent/pkg/boolexp/boolexp.go new file mode 100644 index 00000000000..87d43ab2eb6 --- /dev/null +++ b/x-pack/agent/pkg/boolexp/boolexp.go @@ -0,0 +1,17 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package boolexp + +//go:generate antlr4 -Dlanguage=Go -o parser Boolexp.g4 -visitor + +// Eval takes an expression, parse and evaluate it, everytime this method is called a new +// parser is created, if you want to reuse the parsed tree see the `New` method. +func Eval(expression string, methods *MethodsReg, store VarStore) (bool, error) { + e, err := New(expression, methods) + if err != nil { + return false, err + } + return e.Eval(store) +} diff --git a/x-pack/agent/pkg/boolexp/boolexp_test.go b/x-pack/agent/pkg/boolexp/boolexp_test.go new file mode 100644 index 00000000000..9a724833747 --- /dev/null +++ b/x-pack/agent/pkg/boolexp/boolexp_test.go @@ -0,0 +1,272 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package boolexp + +import ( + "fmt" + "os" + "strings" + "testing" + + "github.com/antlr/antlr4/runtime/Go/antlr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/boolexp/parser" +) + +var showDebug = lookupEnvOrDefault("DEBUG", "0") + +type testVarStore struct { + vars map[string]interface{} +} + +func (s *testVarStore) Lookup(v string) (interface{}, bool) { + val, ok := s.vars[v] + return val, ok +} + +func TestBoolexp(t *testing.T) { + testcases := []struct { + expression string + result bool + err bool + }{ + // Variables + {expression: "%{[hello.var]} == 'hello'", result: true}, + {expression: "contains(%{[hello.var]}, 'hell')", result: true}, + + {expression: "true", result: true}, + {expression: "false", result: false}, + {expression: "!false", result: true}, + {expression: "!true", result: false}, + {expression: "!(1 == 1)", result: false}, + {expression: "NOT false", result: true}, + {expression: "NOT true", result: false}, + {expression: "not false", result: true}, + {expression: "not true", result: false}, + {expression: "NOT (1 == 1)", result: false}, + + {expression: "1 == 1", result: true}, + {expression: "1 == 2", result: false}, + {expression: "1 != 2", result: true}, + {expression: "1 != 1", result: false}, + {expression: "'hello' == 'hello'", result: true}, + {expression: "'hello' == 'hola'", result: false}, + + // and + {expression: "(1 == 1) AND (2 == 2)", result: true}, + {expression: "(1 == 4) AND (2 == 2)", result: false}, + {expression: "(1 == 1) AND (2 == 3)", result: false}, + {expression: "(1 == 5) AND (2 == 3)", result: false}, + + {expression: "1 == 1 AND 2 == 2", result: true}, + {expression: "1 == 4 AND 2 == 2", result: false}, + {expression: "1 == 1 AND 2 == 3", result: false}, + {expression: "1 == 5 AND 2 == 3", result: false}, + + {expression: "(1 == 1) and (2 == 2)", result: true}, + {expression: "(1 == 4) and (2 == 2)", result: false}, + {expression: "(1 == 1) and (2 == 3)", result: false}, + {expression: "(1 == 5) and (2 == 3)", result: false}, + + {expression: "(1 == 1) && (2 == 2)", result: true}, + {expression: "(1 == 4) && (2 == 2)", result: false}, + {expression: "(1 == 1) && (2 == 3)", result: false}, + {expression: "(1 == 5) && (2 == 3)", result: false}, + + // or + {expression: "(1 == 1) OR (2 == 2)", result: true}, + {expression: "(1 == 1) OR (3 == 2)", result: true}, + {expression: "(1 == 2) OR (2 == 2)", result: true}, + {expression: "(1 == 2) OR (2 == 2)", result: true}, + {expression: "(1 == 2) OR (1 == 2)", result: false}, + + {expression: "(1 == 1) or (2 == 2)", result: true}, + {expression: "(1 == 1) or (3 == 2)", result: true}, + {expression: "(1 == 2) or (2 == 2)", result: true}, + {expression: "(1 == 2) or (2 == 2)", result: true}, + {expression: "(1 == 2) or (1 == 2)", result: false}, + + {expression: "(1 == 1) || (2 == 2)", result: true}, + {expression: "(1 == 1) || (3 == 2)", result: true}, + {expression: "(1 == 2) || (2 == 2)", result: true}, + {expression: "(1 == 2) || (2 == 2)", result: true}, + {expression: "(1 == 2) || (1 == 2)", result: false}, + + // mixed + {expression: "((1 == 1) AND (2 == 2)) OR (2 != 3)", result: true}, + {expression: "(1 == 1 OR 2 == 2) AND 2 != 3", result: true}, + {expression: "((1 == 1) AND (2 == 2)) OR (2 != 3)", result: true}, + {expression: "1 == 1 OR 2 == 2 AND 2 != 3", result: true}, + + // functions + {expression: "len('hello') == 5", result: true}, + {expression: "len('hello') != 1", result: true}, + {expression: "len('hello') == 1", result: false}, + {expression: "(len('hello') == 5) AND (len('Hi') == 2)", result: true}, + {expression: "len('hello') == size('hello')", result: true}, + {expression: "len('hello') == size('hi')", result: false}, + {expression: "contains('hello', 'eial')", result: false}, + {expression: "contains('hello', 'hel')", result: true}, + {expression: "!contains('hello', 'hel')", result: false}, + {expression: "contains('hello', 'hel') == true", result: true}, + {expression: "contains('hello', 'hel') == false", result: false}, + {expression: "countArgs('A', 'B', 'C', 'D', 'E', 'F') == 6", result: true}, + {expression: "countArgs('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J') == 10", result: true}, + + // integers + {expression: "1 < 5", result: true}, + {expression: "10 < 5", result: false}, + {expression: "1 > 5", result: false}, + {expression: "10 > 5", result: true}, + {expression: "1 <= 5", result: true}, + {expression: "5 <= 5", result: true}, + {expression: "10 <= 5", result: false}, + {expression: "10 >= 5", result: true}, + {expression: "5 >= 5", result: true}, + {expression: "4 >= 5", result: false}, + + // Floats + {expression: "1 == 1.0", result: true}, + {expression: "1.0 == 1.0", result: true}, + {expression: "1.0 == 1", result: true}, + {expression: "1 != 2.0", result: true}, + {expression: "1.0 != 2.0", result: true}, + {expression: "1.0 != 2", result: true}, + {expression: "1 < 5.0", result: true}, + {expression: "10 < 5.0", result: false}, + {expression: "1 > 5.0", result: false}, + {expression: "10 > 5.0", result: true}, + {expression: "1 <= 5.0", result: true}, + {expression: "10 <= 5.0", result: false}, + {expression: "1 >= 5.0", result: false}, + {expression: "10 >= 5.0", result: true}, + {expression: "10 >= 10.0", result: true}, + {expression: "10 <= 10.0", result: true}, + + // Bad expression and malformed expression + {expression: "contains('hello')", err: true}, + {expression: "contains()", err: true}, + {expression: "contains()", err: true}, + {expression: "donotexist()", err: true}, + } + + store := &testVarStore{ + vars: map[string]interface{}{ + "hello.var": "hello", + }, + } + + fn := func(args []interface{}) (interface{}, error) { + if len(args) != 1 { + return nil, fmt.Errorf("expecting 1 argument received %d", len(args)) + } + val, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("expecting a string received %T", args[0]) + } + return len(val), nil + } + + methods := NewMethodsReg() + methods.Register("len", fn) + // test function aliasing + methods.Register("size", fn) + // test multiples arguments function. + methods.Register("contains", func(args []interface{}) (interface{}, error) { + if len(args) != 2 { + return nil, fmt.Errorf("expecting 2 arguments received %d", len(args)) + } + + haystack, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("args 1 must be a string and received %T", args[0]) + } + + needle, ok := args[1].(string) + if !ok { + return nil, fmt.Errorf("args 2 must be a string and received %T", args[0]) + } + + return strings.Contains(haystack, needle), nil + }, + ) + + methods.Register("countArgs", func(args []interface{}) (interface{}, error) { + return len(args), nil + }) + + for _, test := range testcases { + test := test + var title string + if test.err { + title = fmt.Sprintf("%s failed parsing", test.expression) + } else { + title = fmt.Sprintf("%s => return %v", test.expression, test.result) + } + t.Run(title, func(t *testing.T) { + if showDebug == "1" { + debug(test.expression) + } + + r, err := Eval(test.expression, methods, store) + + if test.err { + require.Error(t, err) + return + } + + assert.Equal(t, test.result, r) + }) + } +} + +func debug(expression string) { + raw := antlr.NewInputStream(expression) + + lexer := parser.NewBoolexpLexer(raw) + for { + t := lexer.NextToken() + if t.GetTokenType() == antlr.TokenEOF { + break + } + fmt.Printf("%s (%q)\n", + lexer.SymbolicNames[t.GetTokenType()], t.GetText()) + } +} + +var result bool + +func BenchmarkEval(b *testing.B) { + fn := func(args []interface{}) (interface{}, error) { + if len(args) != 1 { + return nil, fmt.Errorf("expecting 1 argument received %d", len(args)) + } + val, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("expecting a string received %T", args[0]) + } + return len(val), nil + } + + methods := NewMethodsReg() + methods.Register("len", fn) + + expression, _ := New("(len('hello') == 5) AND (len('Hi') == 2)", methods) + + var r bool + for n := 0; n < b.N; n++ { + r, _ = expression.Eval(nil) + } + result = r +} + +func lookupEnvOrDefault(name, d string) string { + if v, ok := os.LookupEnv(name); ok { + return v + } + return d +} diff --git a/x-pack/agent/pkg/boolexp/compare.go b/x-pack/agent/pkg/boolexp/compare.go new file mode 100644 index 00000000000..9a68286e3df --- /dev/null +++ b/x-pack/agent/pkg/boolexp/compare.go @@ -0,0 +1,283 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package boolexp + +import "fmt" + +type operand interface{} + +type compare func(left, right operand) (bool, error) + +func compareEQ(left, right operand) (bool, error) { + switch v := left.(type) { + case bool: + rV, ok := right.(bool) + if !ok { + return false, nil + } + if rV == v { + return true, nil + } + return false, nil + case int: + switch rv := right.(type) { + case int: + return v == rv, nil + case float64: + // TODO: check overflow, weird things will happen with precision here. + // use modf + return float64(v) == rv, nil + default: + return false, fmt.Errorf( + "compare: ==, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } + case float64: + switch rv := right.(type) { + case int: + return v == float64(rv), nil + case float64: + return v == rv, nil + default: + return false, fmt.Errorf( + "compare: ==, incompatible type to compare both operand must be numbers, left=%T, right=%T", + left, + right, + ) + } + case string: + rV, ok := right.(string) + if !ok { + return false, nil + } + if rV == v { + return true, nil + } + return false, nil + default: + return false, fmt.Errorf( + "compare: ==, incompatible type to compare, left=%T, right=%T", + left, + right, + ) + } +} + +func compareNEQ(left, right operand) (bool, error) { + switch v := left.(type) { + case bool: + rV, ok := right.(bool) + if !ok { + return false, nil + } + if rV == v { + return false, nil + } + return true, nil + case int: + switch rv := right.(type) { + case int: + return v != rv, nil + case float64: + // TODO: check overflow, weird things will happen with precision here. + // use modf + return float64(v) != rv, nil + default: + return false, fmt.Errorf( + "compare: ==, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } + case float64: + switch rv := right.(type) { + case int: + return v != float64(rv), nil + case float64: + return v != rv, nil + default: + return false, fmt.Errorf( + "compare: ==, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } + case string: + rV, ok := right.(string) + if !ok { + return false, nil + } + if rV == v { + return false, nil + } + return true, nil + default: + return false, fmt.Errorf( + "compare: !=, incompatible type to compare, left=%T, right=%T", + left, + right, + ) + } +} + +func compareLT(left, right operand) (bool, error) { + switch v := left.(type) { + case int: + switch rv := right.(type) { + case int: + return v < rv, nil + case float64: + return float64(v) < rv, nil + default: + return false, fmt.Errorf( + "compare: <, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } + case float64: + switch rv := right.(type) { + case int: + return v < float64(rv), nil + case float64: + return v < rv, nil + default: + return false, fmt.Errorf( + "compare: <, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } + default: + return false, fmt.Errorf( + "compare: <, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } +} + +func compareLTE(left, right operand) (bool, error) { + switch v := left.(type) { + case int: + switch rv := right.(type) { + case int: + return v <= rv, nil + case float64: + return float64(v) <= rv, nil + default: + return false, fmt.Errorf( + "compare: <=, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } + case float64: + switch rv := right.(type) { + case int: + return v <= float64(rv), nil + case float64: + return v <= rv, nil + default: + return false, fmt.Errorf( + "compare: <=, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } + default: + return false, fmt.Errorf( + "compare: <=, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } +} + +func compareGT(left, right operand) (bool, error) { + switch v := left.(type) { + case int: + switch rv := right.(type) { + case int: + return v > rv, nil + case float64: + return float64(v) > rv, nil + default: + return false, fmt.Errorf( + "compare: >, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } + case float64: + switch rv := right.(type) { + case int: + return v > float64(rv), nil + case float64: + return v > rv, nil + default: + return false, fmt.Errorf( + "compare: >, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } + default: + return false, fmt.Errorf( + "compare: >, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } +} + +func compareGTE(left, right operand) (bool, error) { + switch v := left.(type) { + case int: + switch rv := right.(type) { + case int: + return v >= rv, nil + case float64: + return float64(v) >= rv, nil + default: + return false, fmt.Errorf( + "compare: >=, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } + case float64: + switch rv := right.(type) { + case int: + return v >= float64(rv), nil + case float64: + return v >= rv, nil + default: + return false, fmt.Errorf( + "compare: >=, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } + default: + return false, fmt.Errorf( + "compare: >=, incompatible type to compare both operands must be numbers, left=%T, right=%T", + left, + right, + ) + } +} + +type logical func(left, right operand) (bool, error) + +func logicalAND(left, right operand) (bool, error) { + return left.(bool) && right.(bool), nil +} + +func logicalOR(left, right operand) (bool, error) { + return left.(bool) == true || right.(bool), nil +} diff --git a/x-pack/agent/pkg/boolexp/expression.go b/x-pack/agent/pkg/boolexp/expression.go new file mode 100644 index 00000000000..c7c5f12a164 --- /dev/null +++ b/x-pack/agent/pkg/boolexp/expression.go @@ -0,0 +1,76 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package boolexp + +import ( + "errors" + "fmt" + + "github.com/antlr/antlr4/runtime/Go/antlr" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/boolexp/parser" +) + +// VarStore is the interface to implements when you want the expression engine to be able to fetch +// the value of a variables. Variables are defined using the field reference syntax likes +/// this: `%{[hello.var]}`. +type VarStore interface { + // Lookup allows to lookup a value of a variable from the store, the lookup method will received + // the name of variable like this. + // + // %{[hello.var]} => hello.var + Lookup(string) (interface{}, bool) +} + +// Errors +var ( + ErrEmptyExpression = errors.New("expression must not be an empty string") +) + +// Expression parse a boolean expression into a tree and allow to evaluate the expression. +type Expression struct { + expression string + tree antlr.ParseTree + methodsReg *MethodsReg + vars VarStore +} + +// Eval evaluates the expression using a visitor and the provided methods registry, will return true +// or any evaluation errors. +func (e *Expression) Eval(store VarStore) (result bool, err error) { + // Antlr can panic on errors so we have to recover somehow. + defer func() { + r := recover() + if r != nil { + err = fmt.Errorf("error in while parsing the expression %s, error %+v", e.expression, r) + } + }() + + visitor := &expVisitor{methodsReg: e.methodsReg, vars: store} + r := visitor.Visit(e.tree) + + if visitor.err != nil { + return false, visitor.err + } + + return r.(bool), nil +} + +// New create a new boolean expression parser will return an error if the expression if invalid. +func New(expression string, methods *MethodsReg) (*Expression, error) { + if len(expression) == 0 { + return nil, ErrEmptyExpression + } + + input := antlr.NewInputStream(expression) + lexer := parser.NewBoolexpLexer(input) + lexer.RemoveErrorListeners() + tokens := antlr.NewCommonTokenStream(lexer, antlr.TokenDefaultChannel) + p := parser.NewBoolexpParser(tokens) + p.RemoveErrorListeners() + tree := p.ExpList() + + return &Expression{expression: expression, tree: tree, methodsReg: methods}, nil +} diff --git a/x-pack/agent/pkg/boolexp/methods.go b/x-pack/agent/pkg/boolexp/methods.go new file mode 100644 index 00000000000..eada71e1bde --- /dev/null +++ b/x-pack/agent/pkg/boolexp/methods.go @@ -0,0 +1,58 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package boolexp + +import "fmt" + +// CallFunc is a function called while the expression evaluation is done, the function is responsable +// of doing the type conversion and allow checking the arity of the function. +type CallFunc func(args []interface{}) (interface{}, error) + +// Method encapsulate a method. +type Method struct { + Name string + Func CallFunc +} + +// MethodsReg is the registry of the methods, when the evaluation is done and a function is found we +// will lookup the function in the registry. If the method is found the methods will be executed, +// otherwise the evaluation will fail. +// +// NOTE: Define methods must have a unique name and capitalization is important. +type MethodsReg struct { + methods map[string]Method +} + +// Register registers a new methods, the method will return an error if the method with the same +// name already exists in the registry. +func (m *MethodsReg) Register(name string, f CallFunc) error { + _, ok := m.methods[name] + if ok { + return fmt.Errorf("method %s already exists", name) + } + m.methods[name] = Method{Name: name, Func: f} + return nil +} + +// MustRegister registers a new methods and will panic on any error. +func (m *MethodsReg) MustRegister(name string, f CallFunc) { + err := m.Register(name, f) + if err != nil { + panic(err) + } +} + +// Lookup search a methods by name and return it, will return false if the method is not found. +// +// NOTE: When looking methods name capitalization is important. +func (m *MethodsReg) Lookup(name string) (Method, bool) { + v, ok := m.methods[name] + return v, ok +} + +// NewMethodsReg returns a new methods registry. +func NewMethodsReg() *MethodsReg { + return &MethodsReg{methods: make(map[string]Method)} +} diff --git a/x-pack/agent/pkg/boolexp/parser/Boolexp.interp b/x-pack/agent/pkg/boolexp/parser/Boolexp.interp new file mode 100644 index 00000000000..b3a5b7da1f5 --- /dev/null +++ b/x-pack/agent/pkg/boolexp/parser/Boolexp.interp @@ -0,0 +1,55 @@ +token literal names: +null +',' +'==' +'!=' +'>' +'<' +'>=' +'<=' +null +null +null +null +null +null +null +null +null +null +null +'(' +')' + +token symbolic names: +null +null +EQ +NEQ +GT +LT +GTE +LTE +AND +OR +TRUE +FALSE +FLOAT +NUMBER +WHITESPACE +NOT +VARIABLE +METHODNAME +TEXT +LPAR +RPAR + +rule names: +expList +exp +boolean +arguments + + +atn: +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 22, 73, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 26, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 32, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 7, 3, 58, 10, 3, 12, 3, 14, 3, 61, 11, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 7, 5, 68, 10, 5, 12, 5, 14, 5, 71, 11, 5, 3, 5, 2, 3, 4, 6, 2, 4, 6, 8, 2, 3, 3, 2, 12, 13, 2, 85, 2, 10, 3, 2, 2, 2, 4, 31, 3, 2, 2, 2, 6, 62, 3, 2, 2, 2, 8, 64, 3, 2, 2, 2, 10, 11, 5, 4, 3, 2, 11, 12, 7, 2, 2, 3, 12, 3, 3, 2, 2, 2, 13, 14, 8, 3, 1, 2, 14, 15, 7, 21, 2, 2, 15, 16, 5, 4, 3, 2, 16, 17, 7, 22, 2, 2, 17, 32, 3, 2, 2, 2, 18, 19, 7, 17, 2, 2, 19, 32, 5, 4, 3, 17, 20, 32, 5, 6, 4, 2, 21, 32, 7, 18, 2, 2, 22, 23, 7, 19, 2, 2, 23, 25, 7, 21, 2, 2, 24, 26, 5, 8, 5, 2, 25, 24, 3, 2, 2, 2, 25, 26, 3, 2, 2, 2, 26, 27, 3, 2, 2, 2, 27, 32, 7, 22, 2, 2, 28, 32, 7, 20, 2, 2, 29, 32, 7, 14, 2, 2, 30, 32, 7, 15, 2, 2, 31, 13, 3, 2, 2, 2, 31, 18, 3, 2, 2, 2, 31, 20, 3, 2, 2, 2, 31, 21, 3, 2, 2, 2, 31, 22, 3, 2, 2, 2, 31, 28, 3, 2, 2, 2, 31, 29, 3, 2, 2, 2, 31, 30, 3, 2, 2, 2, 32, 59, 3, 2, 2, 2, 33, 34, 12, 16, 2, 2, 34, 35, 7, 4, 2, 2, 35, 58, 5, 4, 3, 17, 36, 37, 12, 15, 2, 2, 37, 38, 7, 5, 2, 2, 38, 58, 5, 4, 3, 16, 39, 40, 12, 14, 2, 2, 40, 41, 7, 9, 2, 2, 41, 58, 5, 4, 3, 15, 42, 43, 12, 13, 2, 2, 43, 44, 7, 8, 2, 2, 44, 58, 5, 4, 3, 14, 45, 46, 12, 12, 2, 2, 46, 47, 7, 7, 2, 2, 47, 58, 5, 4, 3, 13, 48, 49, 12, 11, 2, 2, 49, 50, 7, 6, 2, 2, 50, 58, 5, 4, 3, 12, 51, 52, 12, 10, 2, 2, 52, 53, 7, 10, 2, 2, 53, 58, 5, 4, 3, 11, 54, 55, 12, 9, 2, 2, 55, 56, 7, 11, 2, 2, 56, 58, 5, 4, 3, 10, 57, 33, 3, 2, 2, 2, 57, 36, 3, 2, 2, 2, 57, 39, 3, 2, 2, 2, 57, 42, 3, 2, 2, 2, 57, 45, 3, 2, 2, 2, 57, 48, 3, 2, 2, 2, 57, 51, 3, 2, 2, 2, 57, 54, 3, 2, 2, 2, 58, 61, 3, 2, 2, 2, 59, 57, 3, 2, 2, 2, 59, 60, 3, 2, 2, 2, 60, 5, 3, 2, 2, 2, 61, 59, 3, 2, 2, 2, 62, 63, 9, 2, 2, 2, 63, 7, 3, 2, 2, 2, 64, 69, 5, 4, 3, 2, 65, 66, 7, 3, 2, 2, 66, 68, 5, 4, 3, 2, 67, 65, 3, 2, 2, 2, 68, 71, 3, 2, 2, 2, 69, 67, 3, 2, 2, 2, 69, 70, 3, 2, 2, 2, 70, 9, 3, 2, 2, 2, 71, 69, 3, 2, 2, 2, 7, 25, 31, 57, 59, 69] \ No newline at end of file diff --git a/x-pack/agent/pkg/boolexp/parser/Boolexp.tokens b/x-pack/agent/pkg/boolexp/parser/Boolexp.tokens new file mode 100644 index 00000000000..6892b3a80c1 --- /dev/null +++ b/x-pack/agent/pkg/boolexp/parser/Boolexp.tokens @@ -0,0 +1,29 @@ +T__0=1 +EQ=2 +NEQ=3 +GT=4 +LT=5 +GTE=6 +LTE=7 +AND=8 +OR=9 +TRUE=10 +FALSE=11 +FLOAT=12 +NUMBER=13 +WHITESPACE=14 +NOT=15 +VARIABLE=16 +METHODNAME=17 +TEXT=18 +LPAR=19 +RPAR=20 +','=1 +'=='=2 +'!='=3 +'>'=4 +'<'=5 +'>='=6 +'<='=7 +'('=19 +')'=20 diff --git a/x-pack/agent/pkg/boolexp/parser/BoolexpLexer.interp b/x-pack/agent/pkg/boolexp/parser/BoolexpLexer.interp new file mode 100644 index 00000000000..11612b59eb9 --- /dev/null +++ b/x-pack/agent/pkg/boolexp/parser/BoolexpLexer.interp @@ -0,0 +1,79 @@ +token literal names: +null +',' +'==' +'!=' +'>' +'<' +'>=' +'<=' +null +null +null +null +null +null +null +null +null +null +null +'(' +')' + +token symbolic names: +null +null +EQ +NEQ +GT +LT +GTE +LTE +AND +OR +TRUE +FALSE +FLOAT +NUMBER +WHITESPACE +NOT +VARIABLE +METHODNAME +TEXT +LPAR +RPAR + +rule names: +T__0 +EQ +NEQ +GT +LT +GTE +LTE +AND +OR +TRUE +FALSE +FLOAT +NUMBER +WHITESPACE +NOT +VARIABLE +METHODNAME +TEXT +LPAR +RPAR +BEGIN_VARIABLE +END_VARIABLE + +channel names: +DEFAULT_TOKEN_CHANNEL +HIDDEN + +mode names: +DEFAULT_MODE + +atn: +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 22, 183, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 3, 8, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 5, 9, 74, 10, 9, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 5, 10, 82, 10, 10, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 5, 11, 92, 10, 11, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 5, 12, 104, 10, 12, 3, 13, 6, 13, 107, 10, 13, 13, 13, 14, 13, 108, 3, 13, 3, 13, 6, 13, 113, 10, 13, 13, 13, 14, 13, 114, 3, 14, 6, 14, 118, 10, 14, 13, 14, 14, 14, 119, 3, 15, 6, 15, 123, 10, 15, 13, 15, 14, 15, 124, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 5, 16, 136, 10, 16, 3, 17, 3, 17, 6, 17, 140, 10, 17, 13, 17, 14, 17, 141, 3, 17, 3, 17, 6, 17, 146, 10, 17, 13, 17, 14, 17, 147, 7, 17, 150, 10, 17, 12, 17, 14, 17, 153, 11, 17, 3, 17, 3, 17, 3, 18, 3, 18, 7, 18, 159, 10, 18, 12, 18, 14, 18, 162, 11, 18, 3, 19, 3, 19, 7, 19, 166, 10, 19, 12, 19, 14, 19, 169, 11, 19, 3, 19, 3, 19, 3, 20, 3, 20, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 2, 2, 24, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 2, 45, 2, 3, 2, 9, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, 34, 34, 7, 2, 48, 48, 50, 59, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 92, 92, 97, 97, 99, 124, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, 12, 15, 15, 41, 41, 2, 197, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 3, 47, 3, 2, 2, 2, 5, 49, 3, 2, 2, 2, 7, 52, 3, 2, 2, 2, 9, 55, 3, 2, 2, 2, 11, 57, 3, 2, 2, 2, 13, 59, 3, 2, 2, 2, 15, 62, 3, 2, 2, 2, 17, 73, 3, 2, 2, 2, 19, 81, 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, 103, 3, 2, 2, 2, 25, 106, 3, 2, 2, 2, 27, 117, 3, 2, 2, 2, 29, 122, 3, 2, 2, 2, 31, 135, 3, 2, 2, 2, 33, 137, 3, 2, 2, 2, 35, 156, 3, 2, 2, 2, 37, 163, 3, 2, 2, 2, 39, 172, 3, 2, 2, 2, 41, 174, 3, 2, 2, 2, 43, 176, 3, 2, 2, 2, 45, 180, 3, 2, 2, 2, 47, 48, 7, 46, 2, 2, 48, 4, 3, 2, 2, 2, 49, 50, 7, 63, 2, 2, 50, 51, 7, 63, 2, 2, 51, 6, 3, 2, 2, 2, 52, 53, 7, 35, 2, 2, 53, 54, 7, 63, 2, 2, 54, 8, 3, 2, 2, 2, 55, 56, 7, 64, 2, 2, 56, 10, 3, 2, 2, 2, 57, 58, 7, 62, 2, 2, 58, 12, 3, 2, 2, 2, 59, 60, 7, 64, 2, 2, 60, 61, 7, 63, 2, 2, 61, 14, 3, 2, 2, 2, 62, 63, 7, 62, 2, 2, 63, 64, 7, 63, 2, 2, 64, 16, 3, 2, 2, 2, 65, 66, 7, 99, 2, 2, 66, 67, 7, 112, 2, 2, 67, 74, 7, 102, 2, 2, 68, 69, 7, 67, 2, 2, 69, 70, 7, 80, 2, 2, 70, 74, 7, 70, 2, 2, 71, 72, 7, 40, 2, 2, 72, 74, 7, 40, 2, 2, 73, 65, 3, 2, 2, 2, 73, 68, 3, 2, 2, 2, 73, 71, 3, 2, 2, 2, 74, 18, 3, 2, 2, 2, 75, 76, 7, 113, 2, 2, 76, 82, 7, 116, 2, 2, 77, 78, 7, 81, 2, 2, 78, 82, 7, 84, 2, 2, 79, 80, 7, 126, 2, 2, 80, 82, 7, 126, 2, 2, 81, 75, 3, 2, 2, 2, 81, 77, 3, 2, 2, 2, 81, 79, 3, 2, 2, 2, 82, 20, 3, 2, 2, 2, 83, 84, 7, 118, 2, 2, 84, 85, 7, 116, 2, 2, 85, 86, 7, 119, 2, 2, 86, 92, 7, 103, 2, 2, 87, 88, 7, 86, 2, 2, 88, 89, 7, 84, 2, 2, 89, 90, 7, 87, 2, 2, 90, 92, 7, 71, 2, 2, 91, 83, 3, 2, 2, 2, 91, 87, 3, 2, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 104, 2, 2, 94, 95, 7, 99, 2, 2, 95, 96, 7, 110, 2, 2, 96, 97, 7, 117, 2, 2, 97, 104, 7, 103, 2, 2, 98, 99, 7, 72, 2, 2, 99, 100, 7, 67, 2, 2, 100, 101, 7, 78, 2, 2, 101, 102, 7, 85, 2, 2, 102, 104, 7, 71, 2, 2, 103, 93, 3, 2, 2, 2, 103, 98, 3, 2, 2, 2, 104, 24, 3, 2, 2, 2, 105, 107, 9, 2, 2, 2, 106, 105, 3, 2, 2, 2, 107, 108, 3, 2, 2, 2, 108, 106, 3, 2, 2, 2, 108, 109, 3, 2, 2, 2, 109, 110, 3, 2, 2, 2, 110, 112, 7, 48, 2, 2, 111, 113, 9, 2, 2, 2, 112, 111, 3, 2, 2, 2, 113, 114, 3, 2, 2, 2, 114, 112, 3, 2, 2, 2, 114, 115, 3, 2, 2, 2, 115, 26, 3, 2, 2, 2, 116, 118, 9, 2, 2, 2, 117, 116, 3, 2, 2, 2, 118, 119, 3, 2, 2, 2, 119, 117, 3, 2, 2, 2, 119, 120, 3, 2, 2, 2, 120, 28, 3, 2, 2, 2, 121, 123, 9, 3, 2, 2, 122, 121, 3, 2, 2, 2, 123, 124, 3, 2, 2, 2, 124, 122, 3, 2, 2, 2, 124, 125, 3, 2, 2, 2, 125, 126, 3, 2, 2, 2, 126, 127, 8, 15, 2, 2, 127, 30, 3, 2, 2, 2, 128, 129, 7, 80, 2, 2, 129, 130, 7, 81, 2, 2, 130, 136, 7, 86, 2, 2, 131, 136, 7, 35, 2, 2, 132, 133, 7, 112, 2, 2, 133, 134, 7, 113, 2, 2, 134, 136, 7, 118, 2, 2, 135, 128, 3, 2, 2, 2, 135, 131, 3, 2, 2, 2, 135, 132, 3, 2, 2, 2, 136, 32, 3, 2, 2, 2, 137, 139, 5, 43, 22, 2, 138, 140, 9, 4, 2, 2, 139, 138, 3, 2, 2, 2, 140, 141, 3, 2, 2, 2, 141, 139, 3, 2, 2, 2, 141, 142, 3, 2, 2, 2, 142, 151, 3, 2, 2, 2, 143, 145, 7, 48, 2, 2, 144, 146, 9, 5, 2, 2, 145, 144, 3, 2, 2, 2, 146, 147, 3, 2, 2, 2, 147, 145, 3, 2, 2, 2, 147, 148, 3, 2, 2, 2, 148, 150, 3, 2, 2, 2, 149, 143, 3, 2, 2, 2, 150, 153, 3, 2, 2, 2, 151, 149, 3, 2, 2, 2, 151, 152, 3, 2, 2, 2, 152, 154, 3, 2, 2, 2, 153, 151, 3, 2, 2, 2, 154, 155, 5, 45, 23, 2, 155, 34, 3, 2, 2, 2, 156, 160, 9, 6, 2, 2, 157, 159, 9, 7, 2, 2, 158, 157, 3, 2, 2, 2, 159, 162, 3, 2, 2, 2, 160, 158, 3, 2, 2, 2, 160, 161, 3, 2, 2, 2, 161, 36, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 163, 167, 7, 41, 2, 2, 164, 166, 10, 8, 2, 2, 165, 164, 3, 2, 2, 2, 166, 169, 3, 2, 2, 2, 167, 165, 3, 2, 2, 2, 167, 168, 3, 2, 2, 2, 168, 170, 3, 2, 2, 2, 169, 167, 3, 2, 2, 2, 170, 171, 7, 41, 2, 2, 171, 38, 3, 2, 2, 2, 172, 173, 7, 42, 2, 2, 173, 40, 3, 2, 2, 2, 174, 175, 7, 43, 2, 2, 175, 42, 3, 2, 2, 2, 176, 177, 7, 39, 2, 2, 177, 178, 7, 125, 2, 2, 178, 179, 7, 93, 2, 2, 179, 44, 3, 2, 2, 2, 180, 181, 7, 95, 2, 2, 181, 182, 7, 127, 2, 2, 182, 46, 3, 2, 2, 2, 17, 2, 73, 81, 91, 103, 108, 114, 119, 124, 135, 141, 147, 151, 160, 167, 3, 8, 2, 2] \ No newline at end of file diff --git a/x-pack/agent/pkg/boolexp/parser/BoolexpLexer.tokens b/x-pack/agent/pkg/boolexp/parser/BoolexpLexer.tokens new file mode 100644 index 00000000000..6892b3a80c1 --- /dev/null +++ b/x-pack/agent/pkg/boolexp/parser/BoolexpLexer.tokens @@ -0,0 +1,29 @@ +T__0=1 +EQ=2 +NEQ=3 +GT=4 +LT=5 +GTE=6 +LTE=7 +AND=8 +OR=9 +TRUE=10 +FALSE=11 +FLOAT=12 +NUMBER=13 +WHITESPACE=14 +NOT=15 +VARIABLE=16 +METHODNAME=17 +TEXT=18 +LPAR=19 +RPAR=20 +','=1 +'=='=2 +'!='=3 +'>'=4 +'<'=5 +'>='=6 +'<='=7 +'('=19 +')'=20 diff --git a/x-pack/agent/pkg/boolexp/parser/boolexp_base_listener.go b/x-pack/agent/pkg/boolexp/parser/boolexp_base_listener.go new file mode 100644 index 00000000000..3956df34c1f --- /dev/null +++ b/x-pack/agent/pkg/boolexp/parser/boolexp_base_listener.go @@ -0,0 +1,140 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated from Boolexp.g4 by ANTLR 4.7.2. DO NOT EDIT. + +package parser // Boolexp + +import "github.com/antlr/antlr4/runtime/Go/antlr" + +// BaseBoolexpListener is a complete listener for a parse tree produced by BoolexpParser. +type BaseBoolexpListener struct{} + +var _ BoolexpListener = &BaseBoolexpListener{} + +// VisitTerminal is called when a terminal node is visited. +func (s *BaseBoolexpListener) VisitTerminal(node antlr.TerminalNode) {} + +// VisitErrorNode is called when an error node is visited. +func (s *BaseBoolexpListener) VisitErrorNode(node antlr.ErrorNode) {} + +// EnterEveryRule is called when any rule is entered. +func (s *BaseBoolexpListener) EnterEveryRule(ctx antlr.ParserRuleContext) {} + +// ExitEveryRule is called when any rule is exited. +func (s *BaseBoolexpListener) ExitEveryRule(ctx antlr.ParserRuleContext) {} + +// EnterExpList is called when production expList is entered. +func (s *BaseBoolexpListener) EnterExpList(ctx *ExpListContext) {} + +// ExitExpList is called when production expList is exited. +func (s *BaseBoolexpListener) ExitExpList(ctx *ExpListContext) {} + +// EnterExpArithmeticNEQ is called when production ExpArithmeticNEQ is entered. +func (s *BaseBoolexpListener) EnterExpArithmeticNEQ(ctx *ExpArithmeticNEQContext) {} + +// ExitExpArithmeticNEQ is called when production ExpArithmeticNEQ is exited. +func (s *BaseBoolexpListener) ExitExpArithmeticNEQ(ctx *ExpArithmeticNEQContext) {} + +// EnterExpArithmeticEQ is called when production ExpArithmeticEQ is entered. +func (s *BaseBoolexpListener) EnterExpArithmeticEQ(ctx *ExpArithmeticEQContext) {} + +// ExitExpArithmeticEQ is called when production ExpArithmeticEQ is exited. +func (s *BaseBoolexpListener) ExitExpArithmeticEQ(ctx *ExpArithmeticEQContext) {} + +// EnterExpArithmeticGTE is called when production ExpArithmeticGTE is entered. +func (s *BaseBoolexpListener) EnterExpArithmeticGTE(ctx *ExpArithmeticGTEContext) {} + +// ExitExpArithmeticGTE is called when production ExpArithmeticGTE is exited. +func (s *BaseBoolexpListener) ExitExpArithmeticGTE(ctx *ExpArithmeticGTEContext) {} + +// EnterExpArithmeticLTE is called when production ExpArithmeticLTE is entered. +func (s *BaseBoolexpListener) EnterExpArithmeticLTE(ctx *ExpArithmeticLTEContext) {} + +// ExitExpArithmeticLTE is called when production ExpArithmeticLTE is exited. +func (s *BaseBoolexpListener) ExitExpArithmeticLTE(ctx *ExpArithmeticLTEContext) {} + +// EnterExpArithmeticGT is called when production ExpArithmeticGT is entered. +func (s *BaseBoolexpListener) EnterExpArithmeticGT(ctx *ExpArithmeticGTContext) {} + +// ExitExpArithmeticGT is called when production ExpArithmeticGT is exited. +func (s *BaseBoolexpListener) ExitExpArithmeticGT(ctx *ExpArithmeticGTContext) {} + +// EnterExpText is called when production ExpText is entered. +func (s *BaseBoolexpListener) EnterExpText(ctx *ExpTextContext) {} + +// ExitExpText is called when production ExpText is exited. +func (s *BaseBoolexpListener) ExitExpText(ctx *ExpTextContext) {} + +// EnterExpNumber is called when production ExpNumber is entered. +func (s *BaseBoolexpListener) EnterExpNumber(ctx *ExpNumberContext) {} + +// ExitExpNumber is called when production ExpNumber is exited. +func (s *BaseBoolexpListener) ExitExpNumber(ctx *ExpNumberContext) {} + +// EnterExpLogicalAnd is called when production ExpLogicalAnd is entered. +func (s *BaseBoolexpListener) EnterExpLogicalAnd(ctx *ExpLogicalAndContext) {} + +// ExitExpLogicalAnd is called when production ExpLogicalAnd is exited. +func (s *BaseBoolexpListener) ExitExpLogicalAnd(ctx *ExpLogicalAndContext) {} + +// EnterExpLogicalOR is called when production ExpLogicalOR is entered. +func (s *BaseBoolexpListener) EnterExpLogicalOR(ctx *ExpLogicalORContext) {} + +// ExitExpLogicalOR is called when production ExpLogicalOR is exited. +func (s *BaseBoolexpListener) ExitExpLogicalOR(ctx *ExpLogicalORContext) {} + +// EnterExpFloat is called when production ExpFloat is entered. +func (s *BaseBoolexpListener) EnterExpFloat(ctx *ExpFloatContext) {} + +// ExitExpFloat is called when production ExpFloat is exited. +func (s *BaseBoolexpListener) ExitExpFloat(ctx *ExpFloatContext) {} + +// EnterExpVariable is called when production ExpVariable is entered. +func (s *BaseBoolexpListener) EnterExpVariable(ctx *ExpVariableContext) {} + +// ExitExpVariable is called when production ExpVariable is exited. +func (s *BaseBoolexpListener) ExitExpVariable(ctx *ExpVariableContext) {} + +// EnterExpNot is called when production ExpNot is entered. +func (s *BaseBoolexpListener) EnterExpNot(ctx *ExpNotContext) {} + +// ExitExpNot is called when production ExpNot is exited. +func (s *BaseBoolexpListener) ExitExpNot(ctx *ExpNotContext) {} + +// EnterExpInParen is called when production ExpInParen is entered. +func (s *BaseBoolexpListener) EnterExpInParen(ctx *ExpInParenContext) {} + +// ExitExpInParen is called when production ExpInParen is exited. +func (s *BaseBoolexpListener) ExitExpInParen(ctx *ExpInParenContext) {} + +// EnterExpBoolean is called when production ExpBoolean is entered. +func (s *BaseBoolexpListener) EnterExpBoolean(ctx *ExpBooleanContext) {} + +// ExitExpBoolean is called when production ExpBoolean is exited. +func (s *BaseBoolexpListener) ExitExpBoolean(ctx *ExpBooleanContext) {} + +// EnterExpFunction is called when production ExpFunction is entered. +func (s *BaseBoolexpListener) EnterExpFunction(ctx *ExpFunctionContext) {} + +// ExitExpFunction is called when production ExpFunction is exited. +func (s *BaseBoolexpListener) ExitExpFunction(ctx *ExpFunctionContext) {} + +// EnterExpArithmeticLT is called when production ExpArithmeticLT is entered. +func (s *BaseBoolexpListener) EnterExpArithmeticLT(ctx *ExpArithmeticLTContext) {} + +// ExitExpArithmeticLT is called when production ExpArithmeticLT is exited. +func (s *BaseBoolexpListener) ExitExpArithmeticLT(ctx *ExpArithmeticLTContext) {} + +// EnterBoolean is called when production boolean is entered. +func (s *BaseBoolexpListener) EnterBoolean(ctx *BooleanContext) {} + +// ExitBoolean is called when production boolean is exited. +func (s *BaseBoolexpListener) ExitBoolean(ctx *BooleanContext) {} + +// EnterArguments is called when production arguments is entered. +func (s *BaseBoolexpListener) EnterArguments(ctx *ArgumentsContext) {} + +// ExitArguments is called when production arguments is exited. +func (s *BaseBoolexpListener) ExitArguments(ctx *ArgumentsContext) {} diff --git a/x-pack/agent/pkg/boolexp/parser/boolexp_base_visitor.go b/x-pack/agent/pkg/boolexp/parser/boolexp_base_visitor.go new file mode 100644 index 00000000000..44adc6d7c10 --- /dev/null +++ b/x-pack/agent/pkg/boolexp/parser/boolexp_base_visitor.go @@ -0,0 +1,89 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated from Boolexp.g4 by ANTLR 4.7.2. DO NOT EDIT. + +package parser // Boolexp + +import "github.com/antlr/antlr4/runtime/Go/antlr" + +type BaseBoolexpVisitor struct { + *antlr.BaseParseTreeVisitor +} + +func (v *BaseBoolexpVisitor) VisitExpList(ctx *ExpListContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpArithmeticNEQ(ctx *ExpArithmeticNEQContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpArithmeticEQ(ctx *ExpArithmeticEQContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpArithmeticGTE(ctx *ExpArithmeticGTEContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpArithmeticLTE(ctx *ExpArithmeticLTEContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpArithmeticGT(ctx *ExpArithmeticGTContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpText(ctx *ExpTextContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpNumber(ctx *ExpNumberContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpLogicalAnd(ctx *ExpLogicalAndContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpLogicalOR(ctx *ExpLogicalORContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpFloat(ctx *ExpFloatContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpVariable(ctx *ExpVariableContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpNot(ctx *ExpNotContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpInParen(ctx *ExpInParenContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpBoolean(ctx *ExpBooleanContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpFunction(ctx *ExpFunctionContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitExpArithmeticLT(ctx *ExpArithmeticLTContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitBoolean(ctx *BooleanContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseBoolexpVisitor) VisitArguments(ctx *ArgumentsContext) interface{} { + return v.VisitChildren(ctx) +} diff --git a/x-pack/agent/pkg/boolexp/parser/boolexp_lexer.go b/x-pack/agent/pkg/boolexp/parser/boolexp_lexer.go new file mode 100644 index 00000000000..b89f1dbc517 --- /dev/null +++ b/x-pack/agent/pkg/boolexp/parser/boolexp_lexer.go @@ -0,0 +1,191 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated from Boolexp.g4 by ANTLR 4.7.2. DO NOT EDIT. + +package parser + +import ( + "fmt" + "unicode" + + "github.com/antlr/antlr4/runtime/Go/antlr" +) + +// Suppress unused import error +var _ = fmt.Printf +var _ = unicode.IsLetter + +var serializedLexerAtn = []uint16{ + 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 22, 183, + 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, + 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, + 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, + 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, + 9, 23, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, + 3, 6, 3, 7, 3, 7, 3, 7, 3, 8, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, + 3, 9, 3, 9, 3, 9, 5, 9, 74, 10, 9, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, + 10, 5, 10, 82, 10, 10, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, + 3, 11, 5, 11, 92, 10, 11, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, + 12, 3, 12, 3, 12, 3, 12, 5, 12, 104, 10, 12, 3, 13, 6, 13, 107, 10, 13, + 13, 13, 14, 13, 108, 3, 13, 3, 13, 6, 13, 113, 10, 13, 13, 13, 14, 13, + 114, 3, 14, 6, 14, 118, 10, 14, 13, 14, 14, 14, 119, 3, 15, 6, 15, 123, + 10, 15, 13, 15, 14, 15, 124, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, + 3, 16, 3, 16, 3, 16, 5, 16, 136, 10, 16, 3, 17, 3, 17, 6, 17, 140, 10, + 17, 13, 17, 14, 17, 141, 3, 17, 3, 17, 6, 17, 146, 10, 17, 13, 17, 14, + 17, 147, 7, 17, 150, 10, 17, 12, 17, 14, 17, 153, 11, 17, 3, 17, 3, 17, + 3, 18, 3, 18, 7, 18, 159, 10, 18, 12, 18, 14, 18, 162, 11, 18, 3, 19, 3, + 19, 7, 19, 166, 10, 19, 12, 19, 14, 19, 169, 11, 19, 3, 19, 3, 19, 3, 20, + 3, 20, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 2, + 2, 24, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, + 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, + 21, 41, 22, 43, 2, 45, 2, 3, 2, 9, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, + 34, 34, 7, 2, 48, 48, 50, 59, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 92, + 92, 97, 97, 99, 124, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, + 97, 97, 99, 124, 5, 2, 12, 12, 15, 15, 41, 41, 2, 197, 2, 3, 3, 2, 2, 2, + 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, + 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, + 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, + 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, + 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 3, 47, + 3, 2, 2, 2, 5, 49, 3, 2, 2, 2, 7, 52, 3, 2, 2, 2, 9, 55, 3, 2, 2, 2, 11, + 57, 3, 2, 2, 2, 13, 59, 3, 2, 2, 2, 15, 62, 3, 2, 2, 2, 17, 73, 3, 2, 2, + 2, 19, 81, 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, 103, 3, 2, 2, 2, 25, 106, + 3, 2, 2, 2, 27, 117, 3, 2, 2, 2, 29, 122, 3, 2, 2, 2, 31, 135, 3, 2, 2, + 2, 33, 137, 3, 2, 2, 2, 35, 156, 3, 2, 2, 2, 37, 163, 3, 2, 2, 2, 39, 172, + 3, 2, 2, 2, 41, 174, 3, 2, 2, 2, 43, 176, 3, 2, 2, 2, 45, 180, 3, 2, 2, + 2, 47, 48, 7, 46, 2, 2, 48, 4, 3, 2, 2, 2, 49, 50, 7, 63, 2, 2, 50, 51, + 7, 63, 2, 2, 51, 6, 3, 2, 2, 2, 52, 53, 7, 35, 2, 2, 53, 54, 7, 63, 2, + 2, 54, 8, 3, 2, 2, 2, 55, 56, 7, 64, 2, 2, 56, 10, 3, 2, 2, 2, 57, 58, + 7, 62, 2, 2, 58, 12, 3, 2, 2, 2, 59, 60, 7, 64, 2, 2, 60, 61, 7, 63, 2, + 2, 61, 14, 3, 2, 2, 2, 62, 63, 7, 62, 2, 2, 63, 64, 7, 63, 2, 2, 64, 16, + 3, 2, 2, 2, 65, 66, 7, 99, 2, 2, 66, 67, 7, 112, 2, 2, 67, 74, 7, 102, + 2, 2, 68, 69, 7, 67, 2, 2, 69, 70, 7, 80, 2, 2, 70, 74, 7, 70, 2, 2, 71, + 72, 7, 40, 2, 2, 72, 74, 7, 40, 2, 2, 73, 65, 3, 2, 2, 2, 73, 68, 3, 2, + 2, 2, 73, 71, 3, 2, 2, 2, 74, 18, 3, 2, 2, 2, 75, 76, 7, 113, 2, 2, 76, + 82, 7, 116, 2, 2, 77, 78, 7, 81, 2, 2, 78, 82, 7, 84, 2, 2, 79, 80, 7, + 126, 2, 2, 80, 82, 7, 126, 2, 2, 81, 75, 3, 2, 2, 2, 81, 77, 3, 2, 2, 2, + 81, 79, 3, 2, 2, 2, 82, 20, 3, 2, 2, 2, 83, 84, 7, 118, 2, 2, 84, 85, 7, + 116, 2, 2, 85, 86, 7, 119, 2, 2, 86, 92, 7, 103, 2, 2, 87, 88, 7, 86, 2, + 2, 88, 89, 7, 84, 2, 2, 89, 90, 7, 87, 2, 2, 90, 92, 7, 71, 2, 2, 91, 83, + 3, 2, 2, 2, 91, 87, 3, 2, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 104, 2, + 2, 94, 95, 7, 99, 2, 2, 95, 96, 7, 110, 2, 2, 96, 97, 7, 117, 2, 2, 97, + 104, 7, 103, 2, 2, 98, 99, 7, 72, 2, 2, 99, 100, 7, 67, 2, 2, 100, 101, + 7, 78, 2, 2, 101, 102, 7, 85, 2, 2, 102, 104, 7, 71, 2, 2, 103, 93, 3, + 2, 2, 2, 103, 98, 3, 2, 2, 2, 104, 24, 3, 2, 2, 2, 105, 107, 9, 2, 2, 2, + 106, 105, 3, 2, 2, 2, 107, 108, 3, 2, 2, 2, 108, 106, 3, 2, 2, 2, 108, + 109, 3, 2, 2, 2, 109, 110, 3, 2, 2, 2, 110, 112, 7, 48, 2, 2, 111, 113, + 9, 2, 2, 2, 112, 111, 3, 2, 2, 2, 113, 114, 3, 2, 2, 2, 114, 112, 3, 2, + 2, 2, 114, 115, 3, 2, 2, 2, 115, 26, 3, 2, 2, 2, 116, 118, 9, 2, 2, 2, + 117, 116, 3, 2, 2, 2, 118, 119, 3, 2, 2, 2, 119, 117, 3, 2, 2, 2, 119, + 120, 3, 2, 2, 2, 120, 28, 3, 2, 2, 2, 121, 123, 9, 3, 2, 2, 122, 121, 3, + 2, 2, 2, 123, 124, 3, 2, 2, 2, 124, 122, 3, 2, 2, 2, 124, 125, 3, 2, 2, + 2, 125, 126, 3, 2, 2, 2, 126, 127, 8, 15, 2, 2, 127, 30, 3, 2, 2, 2, 128, + 129, 7, 80, 2, 2, 129, 130, 7, 81, 2, 2, 130, 136, 7, 86, 2, 2, 131, 136, + 7, 35, 2, 2, 132, 133, 7, 112, 2, 2, 133, 134, 7, 113, 2, 2, 134, 136, + 7, 118, 2, 2, 135, 128, 3, 2, 2, 2, 135, 131, 3, 2, 2, 2, 135, 132, 3, + 2, 2, 2, 136, 32, 3, 2, 2, 2, 137, 139, 5, 43, 22, 2, 138, 140, 9, 4, 2, + 2, 139, 138, 3, 2, 2, 2, 140, 141, 3, 2, 2, 2, 141, 139, 3, 2, 2, 2, 141, + 142, 3, 2, 2, 2, 142, 151, 3, 2, 2, 2, 143, 145, 7, 48, 2, 2, 144, 146, + 9, 5, 2, 2, 145, 144, 3, 2, 2, 2, 146, 147, 3, 2, 2, 2, 147, 145, 3, 2, + 2, 2, 147, 148, 3, 2, 2, 2, 148, 150, 3, 2, 2, 2, 149, 143, 3, 2, 2, 2, + 150, 153, 3, 2, 2, 2, 151, 149, 3, 2, 2, 2, 151, 152, 3, 2, 2, 2, 152, + 154, 3, 2, 2, 2, 153, 151, 3, 2, 2, 2, 154, 155, 5, 45, 23, 2, 155, 34, + 3, 2, 2, 2, 156, 160, 9, 6, 2, 2, 157, 159, 9, 7, 2, 2, 158, 157, 3, 2, + 2, 2, 159, 162, 3, 2, 2, 2, 160, 158, 3, 2, 2, 2, 160, 161, 3, 2, 2, 2, + 161, 36, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 163, 167, 7, 41, 2, 2, 164, + 166, 10, 8, 2, 2, 165, 164, 3, 2, 2, 2, 166, 169, 3, 2, 2, 2, 167, 165, + 3, 2, 2, 2, 167, 168, 3, 2, 2, 2, 168, 170, 3, 2, 2, 2, 169, 167, 3, 2, + 2, 2, 170, 171, 7, 41, 2, 2, 171, 38, 3, 2, 2, 2, 172, 173, 7, 42, 2, 2, + 173, 40, 3, 2, 2, 2, 174, 175, 7, 43, 2, 2, 175, 42, 3, 2, 2, 2, 176, 177, + 7, 39, 2, 2, 177, 178, 7, 125, 2, 2, 178, 179, 7, 93, 2, 2, 179, 44, 3, + 2, 2, 2, 180, 181, 7, 95, 2, 2, 181, 182, 7, 127, 2, 2, 182, 46, 3, 2, + 2, 2, 17, 2, 73, 81, 91, 103, 108, 114, 119, 124, 135, 141, 147, 151, 160, + 167, 3, 8, 2, 2, +} + +var lexerDeserializer = antlr.NewATNDeserializer(nil) +var lexerAtn = lexerDeserializer.DeserializeFromUInt16(serializedLexerAtn) + +var lexerChannelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", +} + +var lexerModeNames = []string{ + "DEFAULT_MODE", +} + +var lexerLiteralNames = []string{ + "", "','", "'=='", "'!='", "'>'", "'<'", "'>='", "'<='", "", "", "", "", + "", "", "", "", "", "", "", "'('", "')'", +} + +var lexerSymbolicNames = []string{ + "", "", "EQ", "NEQ", "GT", "LT", "GTE", "LTE", "AND", "OR", "TRUE", "FALSE", + "FLOAT", "NUMBER", "WHITESPACE", "NOT", "VARIABLE", "METHODNAME", "TEXT", + "LPAR", "RPAR", +} + +var lexerRuleNames = []string{ + "T__0", "EQ", "NEQ", "GT", "LT", "GTE", "LTE", "AND", "OR", "TRUE", "FALSE", + "FLOAT", "NUMBER", "WHITESPACE", "NOT", "VARIABLE", "METHODNAME", "TEXT", + "LPAR", "RPAR", "BEGIN_VARIABLE", "END_VARIABLE", +} + +type BoolexpLexer struct { + *antlr.BaseLexer + channelNames []string + modeNames []string + // TODO: EOF string +} + +var lexerDecisionToDFA = make([]*antlr.DFA, len(lexerAtn.DecisionToState)) + +func init() { + for index, ds := range lexerAtn.DecisionToState { + lexerDecisionToDFA[index] = antlr.NewDFA(ds, index) + } +} + +func NewBoolexpLexer(input antlr.CharStream) *BoolexpLexer { + + l := new(BoolexpLexer) + + l.BaseLexer = antlr.NewBaseLexer(input) + l.Interpreter = antlr.NewLexerATNSimulator(l, lexerAtn, lexerDecisionToDFA, antlr.NewPredictionContextCache()) + + l.channelNames = lexerChannelNames + l.modeNames = lexerModeNames + l.RuleNames = lexerRuleNames + l.LiteralNames = lexerLiteralNames + l.SymbolicNames = lexerSymbolicNames + l.GrammarFileName = "Boolexp.g4" + // TODO: l.EOF = antlr.TokenEOF + + return l +} + +// BoolexpLexer tokens. +const ( + BoolexpLexerT__0 = 1 + BoolexpLexerEQ = 2 + BoolexpLexerNEQ = 3 + BoolexpLexerGT = 4 + BoolexpLexerLT = 5 + BoolexpLexerGTE = 6 + BoolexpLexerLTE = 7 + BoolexpLexerAND = 8 + BoolexpLexerOR = 9 + BoolexpLexerTRUE = 10 + BoolexpLexerFALSE = 11 + BoolexpLexerFLOAT = 12 + BoolexpLexerNUMBER = 13 + BoolexpLexerWHITESPACE = 14 + BoolexpLexerNOT = 15 + BoolexpLexerVARIABLE = 16 + BoolexpLexerMETHODNAME = 17 + BoolexpLexerTEXT = 18 + BoolexpLexerLPAR = 19 + BoolexpLexerRPAR = 20 +) diff --git a/x-pack/agent/pkg/boolexp/parser/boolexp_listener.go b/x-pack/agent/pkg/boolexp/parser/boolexp_listener.go new file mode 100644 index 00000000000..12ff6e0fe89 --- /dev/null +++ b/x-pack/agent/pkg/boolexp/parser/boolexp_listener.go @@ -0,0 +1,128 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated from Boolexp.g4 by ANTLR 4.7.2. DO NOT EDIT. + +package parser // Boolexp + +import "github.com/antlr/antlr4/runtime/Go/antlr" + +// BoolexpListener is a complete listener for a parse tree produced by BoolexpParser. +type BoolexpListener interface { + antlr.ParseTreeListener + + // EnterExpList is called when entering the expList production. + EnterExpList(c *ExpListContext) + + // EnterExpArithmeticNEQ is called when entering the ExpArithmeticNEQ production. + EnterExpArithmeticNEQ(c *ExpArithmeticNEQContext) + + // EnterExpArithmeticEQ is called when entering the ExpArithmeticEQ production. + EnterExpArithmeticEQ(c *ExpArithmeticEQContext) + + // EnterExpArithmeticGTE is called when entering the ExpArithmeticGTE production. + EnterExpArithmeticGTE(c *ExpArithmeticGTEContext) + + // EnterExpArithmeticLTE is called when entering the ExpArithmeticLTE production. + EnterExpArithmeticLTE(c *ExpArithmeticLTEContext) + + // EnterExpArithmeticGT is called when entering the ExpArithmeticGT production. + EnterExpArithmeticGT(c *ExpArithmeticGTContext) + + // EnterExpText is called when entering the ExpText production. + EnterExpText(c *ExpTextContext) + + // EnterExpNumber is called when entering the ExpNumber production. + EnterExpNumber(c *ExpNumberContext) + + // EnterExpLogicalAnd is called when entering the ExpLogicalAnd production. + EnterExpLogicalAnd(c *ExpLogicalAndContext) + + // EnterExpLogicalOR is called when entering the ExpLogicalOR production. + EnterExpLogicalOR(c *ExpLogicalORContext) + + // EnterExpFloat is called when entering the ExpFloat production. + EnterExpFloat(c *ExpFloatContext) + + // EnterExpVariable is called when entering the ExpVariable production. + EnterExpVariable(c *ExpVariableContext) + + // EnterExpNot is called when entering the ExpNot production. + EnterExpNot(c *ExpNotContext) + + // EnterExpInParen is called when entering the ExpInParen production. + EnterExpInParen(c *ExpInParenContext) + + // EnterExpBoolean is called when entering the ExpBoolean production. + EnterExpBoolean(c *ExpBooleanContext) + + // EnterExpFunction is called when entering the ExpFunction production. + EnterExpFunction(c *ExpFunctionContext) + + // EnterExpArithmeticLT is called when entering the ExpArithmeticLT production. + EnterExpArithmeticLT(c *ExpArithmeticLTContext) + + // EnterBoolean is called when entering the boolean production. + EnterBoolean(c *BooleanContext) + + // EnterArguments is called when entering the arguments production. + EnterArguments(c *ArgumentsContext) + + // ExitExpList is called when exiting the expList production. + ExitExpList(c *ExpListContext) + + // ExitExpArithmeticNEQ is called when exiting the ExpArithmeticNEQ production. + ExitExpArithmeticNEQ(c *ExpArithmeticNEQContext) + + // ExitExpArithmeticEQ is called when exiting the ExpArithmeticEQ production. + ExitExpArithmeticEQ(c *ExpArithmeticEQContext) + + // ExitExpArithmeticGTE is called when exiting the ExpArithmeticGTE production. + ExitExpArithmeticGTE(c *ExpArithmeticGTEContext) + + // ExitExpArithmeticLTE is called when exiting the ExpArithmeticLTE production. + ExitExpArithmeticLTE(c *ExpArithmeticLTEContext) + + // ExitExpArithmeticGT is called when exiting the ExpArithmeticGT production. + ExitExpArithmeticGT(c *ExpArithmeticGTContext) + + // ExitExpText is called when exiting the ExpText production. + ExitExpText(c *ExpTextContext) + + // ExitExpNumber is called when exiting the ExpNumber production. + ExitExpNumber(c *ExpNumberContext) + + // ExitExpLogicalAnd is called when exiting the ExpLogicalAnd production. + ExitExpLogicalAnd(c *ExpLogicalAndContext) + + // ExitExpLogicalOR is called when exiting the ExpLogicalOR production. + ExitExpLogicalOR(c *ExpLogicalORContext) + + // ExitExpFloat is called when exiting the ExpFloat production. + ExitExpFloat(c *ExpFloatContext) + + // ExitExpVariable is called when exiting the ExpVariable production. + ExitExpVariable(c *ExpVariableContext) + + // ExitExpNot is called when exiting the ExpNot production. + ExitExpNot(c *ExpNotContext) + + // ExitExpInParen is called when exiting the ExpInParen production. + ExitExpInParen(c *ExpInParenContext) + + // ExitExpBoolean is called when exiting the ExpBoolean production. + ExitExpBoolean(c *ExpBooleanContext) + + // ExitExpFunction is called when exiting the ExpFunction production. + ExitExpFunction(c *ExpFunctionContext) + + // ExitExpArithmeticLT is called when exiting the ExpArithmeticLT production. + ExitExpArithmeticLT(c *ExpArithmeticLTContext) + + // ExitBoolean is called when exiting the boolean production. + ExitBoolean(c *BooleanContext) + + // ExitArguments is called when exiting the arguments production. + ExitArguments(c *ArgumentsContext) +} diff --git a/x-pack/agent/pkg/boolexp/parser/boolexp_parser.go b/x-pack/agent/pkg/boolexp/parser/boolexp_parser.go new file mode 100644 index 00000000000..2805c35e9e5 --- /dev/null +++ b/x-pack/agent/pkg/boolexp/parser/boolexp_parser.go @@ -0,0 +1,1952 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated from Boolexp.g4 by ANTLR 4.7.2. DO NOT EDIT. + +package parser // Boolexp + +import ( + "fmt" + "reflect" + "strconv" + + "github.com/antlr/antlr4/runtime/Go/antlr" +) + +// Suppress unused import errors +var _ = fmt.Printf +var _ = reflect.Copy +var _ = strconv.Itoa + +var parserATN = []uint16{ + 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 22, 73, 4, + 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 3, 2, 3, 2, 3, 2, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 26, + 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 32, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 7, 3, 58, 10, 3, 12, 3, + 14, 3, 61, 11, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 7, 5, 68, 10, 5, 12, 5, + 14, 5, 71, 11, 5, 3, 5, 2, 3, 4, 6, 2, 4, 6, 8, 2, 3, 3, 2, 12, 13, 2, + 85, 2, 10, 3, 2, 2, 2, 4, 31, 3, 2, 2, 2, 6, 62, 3, 2, 2, 2, 8, 64, 3, + 2, 2, 2, 10, 11, 5, 4, 3, 2, 11, 12, 7, 2, 2, 3, 12, 3, 3, 2, 2, 2, 13, + 14, 8, 3, 1, 2, 14, 15, 7, 21, 2, 2, 15, 16, 5, 4, 3, 2, 16, 17, 7, 22, + 2, 2, 17, 32, 3, 2, 2, 2, 18, 19, 7, 17, 2, 2, 19, 32, 5, 4, 3, 17, 20, + 32, 5, 6, 4, 2, 21, 32, 7, 18, 2, 2, 22, 23, 7, 19, 2, 2, 23, 25, 7, 21, + 2, 2, 24, 26, 5, 8, 5, 2, 25, 24, 3, 2, 2, 2, 25, 26, 3, 2, 2, 2, 26, 27, + 3, 2, 2, 2, 27, 32, 7, 22, 2, 2, 28, 32, 7, 20, 2, 2, 29, 32, 7, 14, 2, + 2, 30, 32, 7, 15, 2, 2, 31, 13, 3, 2, 2, 2, 31, 18, 3, 2, 2, 2, 31, 20, + 3, 2, 2, 2, 31, 21, 3, 2, 2, 2, 31, 22, 3, 2, 2, 2, 31, 28, 3, 2, 2, 2, + 31, 29, 3, 2, 2, 2, 31, 30, 3, 2, 2, 2, 32, 59, 3, 2, 2, 2, 33, 34, 12, + 16, 2, 2, 34, 35, 7, 4, 2, 2, 35, 58, 5, 4, 3, 17, 36, 37, 12, 15, 2, 2, + 37, 38, 7, 5, 2, 2, 38, 58, 5, 4, 3, 16, 39, 40, 12, 14, 2, 2, 40, 41, + 7, 9, 2, 2, 41, 58, 5, 4, 3, 15, 42, 43, 12, 13, 2, 2, 43, 44, 7, 8, 2, + 2, 44, 58, 5, 4, 3, 14, 45, 46, 12, 12, 2, 2, 46, 47, 7, 7, 2, 2, 47, 58, + 5, 4, 3, 13, 48, 49, 12, 11, 2, 2, 49, 50, 7, 6, 2, 2, 50, 58, 5, 4, 3, + 12, 51, 52, 12, 10, 2, 2, 52, 53, 7, 10, 2, 2, 53, 58, 5, 4, 3, 11, 54, + 55, 12, 9, 2, 2, 55, 56, 7, 11, 2, 2, 56, 58, 5, 4, 3, 10, 57, 33, 3, 2, + 2, 2, 57, 36, 3, 2, 2, 2, 57, 39, 3, 2, 2, 2, 57, 42, 3, 2, 2, 2, 57, 45, + 3, 2, 2, 2, 57, 48, 3, 2, 2, 2, 57, 51, 3, 2, 2, 2, 57, 54, 3, 2, 2, 2, + 58, 61, 3, 2, 2, 2, 59, 57, 3, 2, 2, 2, 59, 60, 3, 2, 2, 2, 60, 5, 3, 2, + 2, 2, 61, 59, 3, 2, 2, 2, 62, 63, 9, 2, 2, 2, 63, 7, 3, 2, 2, 2, 64, 69, + 5, 4, 3, 2, 65, 66, 7, 3, 2, 2, 66, 68, 5, 4, 3, 2, 67, 65, 3, 2, 2, 2, + 68, 71, 3, 2, 2, 2, 69, 67, 3, 2, 2, 2, 69, 70, 3, 2, 2, 2, 70, 9, 3, 2, + 2, 2, 71, 69, 3, 2, 2, 2, 7, 25, 31, 57, 59, 69, +} +var deserializer = antlr.NewATNDeserializer(nil) +var deserializedATN = deserializer.DeserializeFromUInt16(parserATN) + +var literalNames = []string{ + "", "','", "'=='", "'!='", "'>'", "'<'", "'>='", "'<='", "", "", "", "", + "", "", "", "", "", "", "", "'('", "')'", +} +var symbolicNames = []string{ + "", "", "EQ", "NEQ", "GT", "LT", "GTE", "LTE", "AND", "OR", "TRUE", "FALSE", + "FLOAT", "NUMBER", "WHITESPACE", "NOT", "VARIABLE", "METHODNAME", "TEXT", + "LPAR", "RPAR", +} + +var ruleNames = []string{ + "expList", "exp", "boolean", "arguments", +} +var decisionToDFA = make([]*antlr.DFA, len(deserializedATN.DecisionToState)) + +func init() { + for index, ds := range deserializedATN.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(ds, index) + } +} + +type BoolexpParser struct { + *antlr.BaseParser +} + +func NewBoolexpParser(input antlr.TokenStream) *BoolexpParser { + this := new(BoolexpParser) + + this.BaseParser = antlr.NewBaseParser(input) + + this.Interpreter = antlr.NewParserATNSimulator(this, deserializedATN, decisionToDFA, antlr.NewPredictionContextCache()) + this.RuleNames = ruleNames + this.LiteralNames = literalNames + this.SymbolicNames = symbolicNames + this.GrammarFileName = "Boolexp.g4" + + return this +} + +// BoolexpParser tokens. +const ( + BoolexpParserEOF = antlr.TokenEOF + BoolexpParserT__0 = 1 + BoolexpParserEQ = 2 + BoolexpParserNEQ = 3 + BoolexpParserGT = 4 + BoolexpParserLT = 5 + BoolexpParserGTE = 6 + BoolexpParserLTE = 7 + BoolexpParserAND = 8 + BoolexpParserOR = 9 + BoolexpParserTRUE = 10 + BoolexpParserFALSE = 11 + BoolexpParserFLOAT = 12 + BoolexpParserNUMBER = 13 + BoolexpParserWHITESPACE = 14 + BoolexpParserNOT = 15 + BoolexpParserVARIABLE = 16 + BoolexpParserMETHODNAME = 17 + BoolexpParserTEXT = 18 + BoolexpParserLPAR = 19 + BoolexpParserRPAR = 20 +) + +// BoolexpParser rules. +const ( + BoolexpParserRULE_expList = 0 + BoolexpParserRULE_exp = 1 + BoolexpParserRULE_boolean = 2 + BoolexpParserRULE_arguments = 3 +) + +// IExpListContext is an interface to support dynamic dispatch. +type IExpListContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // IsExpListContext differentiates from other interfaces. + IsExpListContext() +} + +type ExpListContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyExpListContext() *ExpListContext { + var p = new(ExpListContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = BoolexpParserRULE_expList + return p +} + +func (*ExpListContext) IsExpListContext() {} + +func NewExpListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExpListContext { + var p = new(ExpListContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = BoolexpParserRULE_expList + + return p +} + +func (s *ExpListContext) GetParser() antlr.Parser { return s.parser } + +func (s *ExpListContext) Exp() IExpContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpContext)(nil)).Elem(), 0) + + if t == nil { + return nil + } + + return t.(IExpContext) +} + +func (s *ExpListContext) EOF() antlr.TerminalNode { + return s.GetToken(BoolexpParserEOF, 0) +} + +func (s *ExpListContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpListContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *ExpListContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpList(s) + } +} + +func (s *ExpListContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpList(s) + } +} + +func (s *ExpListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpList(s) + + default: + return t.VisitChildren(s) + } +} + +func (p *BoolexpParser) ExpList() (localctx IExpListContext) { + localctx = NewExpListContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 0, BoolexpParserRULE_expList) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(8) + p.exp(0) + } + { + p.SetState(9) + p.Match(BoolexpParserEOF) + } + + return localctx +} + +// IExpContext is an interface to support dynamic dispatch. +type IExpContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // IsExpContext differentiates from other interfaces. + IsExpContext() +} + +type ExpContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyExpContext() *ExpContext { + var p = new(ExpContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = BoolexpParserRULE_exp + return p +} + +func (*ExpContext) IsExpContext() {} + +func NewExpContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExpContext { + var p = new(ExpContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = BoolexpParserRULE_exp + + return p +} + +func (s *ExpContext) GetParser() antlr.Parser { return s.parser } + +func (s *ExpContext) CopyFrom(ctx *ExpContext) { + s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext) +} + +func (s *ExpContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +type ExpArithmeticNEQContext struct { + *ExpContext + left IExpContext + right IExpContext +} + +func NewExpArithmeticNEQContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpArithmeticNEQContext { + var p = new(ExpArithmeticNEQContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpArithmeticNEQContext) GetLeft() IExpContext { return s.left } + +func (s *ExpArithmeticNEQContext) GetRight() IExpContext { return s.right } + +func (s *ExpArithmeticNEQContext) SetLeft(v IExpContext) { s.left = v } + +func (s *ExpArithmeticNEQContext) SetRight(v IExpContext) { s.right = v } + +func (s *ExpArithmeticNEQContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpArithmeticNEQContext) NEQ() antlr.TerminalNode { + return s.GetToken(BoolexpParserNEQ, 0) +} + +func (s *ExpArithmeticNEQContext) AllExp() []IExpContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IExpContext)(nil)).Elem()) + var tst = make([]IExpContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(IExpContext) + } + } + + return tst +} + +func (s *ExpArithmeticNEQContext) Exp(i int) IExpContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(IExpContext) +} + +func (s *ExpArithmeticNEQContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpArithmeticNEQ(s) + } +} + +func (s *ExpArithmeticNEQContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpArithmeticNEQ(s) + } +} + +func (s *ExpArithmeticNEQContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpArithmeticNEQ(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpArithmeticEQContext struct { + *ExpContext + left IExpContext + right IExpContext +} + +func NewExpArithmeticEQContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpArithmeticEQContext { + var p = new(ExpArithmeticEQContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpArithmeticEQContext) GetLeft() IExpContext { return s.left } + +func (s *ExpArithmeticEQContext) GetRight() IExpContext { return s.right } + +func (s *ExpArithmeticEQContext) SetLeft(v IExpContext) { s.left = v } + +func (s *ExpArithmeticEQContext) SetRight(v IExpContext) { s.right = v } + +func (s *ExpArithmeticEQContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpArithmeticEQContext) EQ() antlr.TerminalNode { + return s.GetToken(BoolexpParserEQ, 0) +} + +func (s *ExpArithmeticEQContext) AllExp() []IExpContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IExpContext)(nil)).Elem()) + var tst = make([]IExpContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(IExpContext) + } + } + + return tst +} + +func (s *ExpArithmeticEQContext) Exp(i int) IExpContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(IExpContext) +} + +func (s *ExpArithmeticEQContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpArithmeticEQ(s) + } +} + +func (s *ExpArithmeticEQContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpArithmeticEQ(s) + } +} + +func (s *ExpArithmeticEQContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpArithmeticEQ(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpArithmeticGTEContext struct { + *ExpContext + left IExpContext + right IExpContext +} + +func NewExpArithmeticGTEContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpArithmeticGTEContext { + var p = new(ExpArithmeticGTEContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpArithmeticGTEContext) GetLeft() IExpContext { return s.left } + +func (s *ExpArithmeticGTEContext) GetRight() IExpContext { return s.right } + +func (s *ExpArithmeticGTEContext) SetLeft(v IExpContext) { s.left = v } + +func (s *ExpArithmeticGTEContext) SetRight(v IExpContext) { s.right = v } + +func (s *ExpArithmeticGTEContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpArithmeticGTEContext) GTE() antlr.TerminalNode { + return s.GetToken(BoolexpParserGTE, 0) +} + +func (s *ExpArithmeticGTEContext) AllExp() []IExpContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IExpContext)(nil)).Elem()) + var tst = make([]IExpContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(IExpContext) + } + } + + return tst +} + +func (s *ExpArithmeticGTEContext) Exp(i int) IExpContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(IExpContext) +} + +func (s *ExpArithmeticGTEContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpArithmeticGTE(s) + } +} + +func (s *ExpArithmeticGTEContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpArithmeticGTE(s) + } +} + +func (s *ExpArithmeticGTEContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpArithmeticGTE(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpArithmeticLTEContext struct { + *ExpContext + left IExpContext + right IExpContext +} + +func NewExpArithmeticLTEContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpArithmeticLTEContext { + var p = new(ExpArithmeticLTEContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpArithmeticLTEContext) GetLeft() IExpContext { return s.left } + +func (s *ExpArithmeticLTEContext) GetRight() IExpContext { return s.right } + +func (s *ExpArithmeticLTEContext) SetLeft(v IExpContext) { s.left = v } + +func (s *ExpArithmeticLTEContext) SetRight(v IExpContext) { s.right = v } + +func (s *ExpArithmeticLTEContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpArithmeticLTEContext) LTE() antlr.TerminalNode { + return s.GetToken(BoolexpParserLTE, 0) +} + +func (s *ExpArithmeticLTEContext) AllExp() []IExpContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IExpContext)(nil)).Elem()) + var tst = make([]IExpContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(IExpContext) + } + } + + return tst +} + +func (s *ExpArithmeticLTEContext) Exp(i int) IExpContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(IExpContext) +} + +func (s *ExpArithmeticLTEContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpArithmeticLTE(s) + } +} + +func (s *ExpArithmeticLTEContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpArithmeticLTE(s) + } +} + +func (s *ExpArithmeticLTEContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpArithmeticLTE(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpArithmeticGTContext struct { + *ExpContext + left IExpContext + right IExpContext +} + +func NewExpArithmeticGTContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpArithmeticGTContext { + var p = new(ExpArithmeticGTContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpArithmeticGTContext) GetLeft() IExpContext { return s.left } + +func (s *ExpArithmeticGTContext) GetRight() IExpContext { return s.right } + +func (s *ExpArithmeticGTContext) SetLeft(v IExpContext) { s.left = v } + +func (s *ExpArithmeticGTContext) SetRight(v IExpContext) { s.right = v } + +func (s *ExpArithmeticGTContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpArithmeticGTContext) GT() antlr.TerminalNode { + return s.GetToken(BoolexpParserGT, 0) +} + +func (s *ExpArithmeticGTContext) AllExp() []IExpContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IExpContext)(nil)).Elem()) + var tst = make([]IExpContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(IExpContext) + } + } + + return tst +} + +func (s *ExpArithmeticGTContext) Exp(i int) IExpContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(IExpContext) +} + +func (s *ExpArithmeticGTContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpArithmeticGT(s) + } +} + +func (s *ExpArithmeticGTContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpArithmeticGT(s) + } +} + +func (s *ExpArithmeticGTContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpArithmeticGT(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpTextContext struct { + *ExpContext +} + +func NewExpTextContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpTextContext { + var p = new(ExpTextContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpTextContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpTextContext) TEXT() antlr.TerminalNode { + return s.GetToken(BoolexpParserTEXT, 0) +} + +func (s *ExpTextContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpText(s) + } +} + +func (s *ExpTextContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpText(s) + } +} + +func (s *ExpTextContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpText(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpNumberContext struct { + *ExpContext +} + +func NewExpNumberContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpNumberContext { + var p = new(ExpNumberContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpNumberContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpNumberContext) NUMBER() antlr.TerminalNode { + return s.GetToken(BoolexpParserNUMBER, 0) +} + +func (s *ExpNumberContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpNumber(s) + } +} + +func (s *ExpNumberContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpNumber(s) + } +} + +func (s *ExpNumberContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpNumber(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpLogicalAndContext struct { + *ExpContext + left IExpContext + right IExpContext +} + +func NewExpLogicalAndContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpLogicalAndContext { + var p = new(ExpLogicalAndContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpLogicalAndContext) GetLeft() IExpContext { return s.left } + +func (s *ExpLogicalAndContext) GetRight() IExpContext { return s.right } + +func (s *ExpLogicalAndContext) SetLeft(v IExpContext) { s.left = v } + +func (s *ExpLogicalAndContext) SetRight(v IExpContext) { s.right = v } + +func (s *ExpLogicalAndContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpLogicalAndContext) AND() antlr.TerminalNode { + return s.GetToken(BoolexpParserAND, 0) +} + +func (s *ExpLogicalAndContext) AllExp() []IExpContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IExpContext)(nil)).Elem()) + var tst = make([]IExpContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(IExpContext) + } + } + + return tst +} + +func (s *ExpLogicalAndContext) Exp(i int) IExpContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(IExpContext) +} + +func (s *ExpLogicalAndContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpLogicalAnd(s) + } +} + +func (s *ExpLogicalAndContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpLogicalAnd(s) + } +} + +func (s *ExpLogicalAndContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpLogicalAnd(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpLogicalORContext struct { + *ExpContext + left IExpContext + right IExpContext +} + +func NewExpLogicalORContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpLogicalORContext { + var p = new(ExpLogicalORContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpLogicalORContext) GetLeft() IExpContext { return s.left } + +func (s *ExpLogicalORContext) GetRight() IExpContext { return s.right } + +func (s *ExpLogicalORContext) SetLeft(v IExpContext) { s.left = v } + +func (s *ExpLogicalORContext) SetRight(v IExpContext) { s.right = v } + +func (s *ExpLogicalORContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpLogicalORContext) OR() antlr.TerminalNode { + return s.GetToken(BoolexpParserOR, 0) +} + +func (s *ExpLogicalORContext) AllExp() []IExpContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IExpContext)(nil)).Elem()) + var tst = make([]IExpContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(IExpContext) + } + } + + return tst +} + +func (s *ExpLogicalORContext) Exp(i int) IExpContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(IExpContext) +} + +func (s *ExpLogicalORContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpLogicalOR(s) + } +} + +func (s *ExpLogicalORContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpLogicalOR(s) + } +} + +func (s *ExpLogicalORContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpLogicalOR(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpFloatContext struct { + *ExpContext +} + +func NewExpFloatContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpFloatContext { + var p = new(ExpFloatContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpFloatContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpFloatContext) FLOAT() antlr.TerminalNode { + return s.GetToken(BoolexpParserFLOAT, 0) +} + +func (s *ExpFloatContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpFloat(s) + } +} + +func (s *ExpFloatContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpFloat(s) + } +} + +func (s *ExpFloatContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpFloat(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpVariableContext struct { + *ExpContext +} + +func NewExpVariableContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpVariableContext { + var p = new(ExpVariableContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpVariableContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpVariableContext) VARIABLE() antlr.TerminalNode { + return s.GetToken(BoolexpParserVARIABLE, 0) +} + +func (s *ExpVariableContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpVariable(s) + } +} + +func (s *ExpVariableContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpVariable(s) + } +} + +func (s *ExpVariableContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpVariable(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpNotContext struct { + *ExpContext +} + +func NewExpNotContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpNotContext { + var p = new(ExpNotContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpNotContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpNotContext) NOT() antlr.TerminalNode { + return s.GetToken(BoolexpParserNOT, 0) +} + +func (s *ExpNotContext) Exp() IExpContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpContext)(nil)).Elem(), 0) + + if t == nil { + return nil + } + + return t.(IExpContext) +} + +func (s *ExpNotContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpNot(s) + } +} + +func (s *ExpNotContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpNot(s) + } +} + +func (s *ExpNotContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpNot(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpInParenContext struct { + *ExpContext +} + +func NewExpInParenContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpInParenContext { + var p = new(ExpInParenContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpInParenContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpInParenContext) LPAR() antlr.TerminalNode { + return s.GetToken(BoolexpParserLPAR, 0) +} + +func (s *ExpInParenContext) Exp() IExpContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpContext)(nil)).Elem(), 0) + + if t == nil { + return nil + } + + return t.(IExpContext) +} + +func (s *ExpInParenContext) RPAR() antlr.TerminalNode { + return s.GetToken(BoolexpParserRPAR, 0) +} + +func (s *ExpInParenContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpInParen(s) + } +} + +func (s *ExpInParenContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpInParen(s) + } +} + +func (s *ExpInParenContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpInParen(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpBooleanContext struct { + *ExpContext +} + +func NewExpBooleanContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpBooleanContext { + var p = new(ExpBooleanContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpBooleanContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpBooleanContext) Boolean() IBooleanContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IBooleanContext)(nil)).Elem(), 0) + + if t == nil { + return nil + } + + return t.(IBooleanContext) +} + +func (s *ExpBooleanContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpBoolean(s) + } +} + +func (s *ExpBooleanContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpBoolean(s) + } +} + +func (s *ExpBooleanContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpBoolean(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpFunctionContext struct { + *ExpContext +} + +func NewExpFunctionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpFunctionContext { + var p = new(ExpFunctionContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpFunctionContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpFunctionContext) METHODNAME() antlr.TerminalNode { + return s.GetToken(BoolexpParserMETHODNAME, 0) +} + +func (s *ExpFunctionContext) LPAR() antlr.TerminalNode { + return s.GetToken(BoolexpParserLPAR, 0) +} + +func (s *ExpFunctionContext) RPAR() antlr.TerminalNode { + return s.GetToken(BoolexpParserRPAR, 0) +} + +func (s *ExpFunctionContext) Arguments() IArgumentsContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IArgumentsContext)(nil)).Elem(), 0) + + if t == nil { + return nil + } + + return t.(IArgumentsContext) +} + +func (s *ExpFunctionContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpFunction(s) + } +} + +func (s *ExpFunctionContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpFunction(s) + } +} + +func (s *ExpFunctionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpFunction(s) + + default: + return t.VisitChildren(s) + } +} + +type ExpArithmeticLTContext struct { + *ExpContext + left IExpContext + right IExpContext +} + +func NewExpArithmeticLTContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExpArithmeticLTContext { + var p = new(ExpArithmeticLTContext) + + p.ExpContext = NewEmptyExpContext() + p.parser = parser + p.CopyFrom(ctx.(*ExpContext)) + + return p +} + +func (s *ExpArithmeticLTContext) GetLeft() IExpContext { return s.left } + +func (s *ExpArithmeticLTContext) GetRight() IExpContext { return s.right } + +func (s *ExpArithmeticLTContext) SetLeft(v IExpContext) { s.left = v } + +func (s *ExpArithmeticLTContext) SetRight(v IExpContext) { s.right = v } + +func (s *ExpArithmeticLTContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExpArithmeticLTContext) LT() antlr.TerminalNode { + return s.GetToken(BoolexpParserLT, 0) +} + +func (s *ExpArithmeticLTContext) AllExp() []IExpContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IExpContext)(nil)).Elem()) + var tst = make([]IExpContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(IExpContext) + } + } + + return tst +} + +func (s *ExpArithmeticLTContext) Exp(i int) IExpContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(IExpContext) +} + +func (s *ExpArithmeticLTContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.EnterExpArithmeticLT(s) + } +} + +func (s *ExpArithmeticLTContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(BoolexpListener); ok { + listenerT.ExitExpArithmeticLT(s) + } +} + +func (s *ExpArithmeticLTContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case BoolexpVisitor: + return t.VisitExpArithmeticLT(s) + + default: + return t.VisitChildren(s) + } +} + +func (p *BoolexpParser) Exp() (localctx IExpContext) { + return p.exp(0) +} + +func (p *BoolexpParser) exp(_p int) (localctx IExpContext) { + var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + _parentState := p.GetState() + localctx = NewExpContext(p, p.GetParserRuleContext(), _parentState) + var _prevctx IExpContext = localctx + var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning. + _startState := 2 + p.EnterRecursionRule(localctx, 2, BoolexpParserRULE_exp, _p) + var _la int + + defer func() { + p.UnrollRecursionContexts(_parentctx) + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + var _alt int + + p.EnterOuterAlt(localctx, 1) + p.SetState(29) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case BoolexpParserLPAR: + localctx = NewExpInParenContext(p, localctx) + p.SetParserRuleContext(localctx) + _prevctx = localctx + + { + p.SetState(12) + p.Match(BoolexpParserLPAR) + } + { + p.SetState(13) + p.exp(0) + } + { + p.SetState(14) + p.Match(BoolexpParserRPAR) + } + + case BoolexpParserNOT: + localctx = NewExpNotContext(p, localctx) + p.SetParserRuleContext(localctx) + _prevctx = localctx + { + p.SetState(16) + p.Match(BoolexpParserNOT) + } + { + p.SetState(17) + p.exp(15) + } + + case BoolexpParserTRUE, BoolexpParserFALSE: + localctx = NewExpBooleanContext(p, localctx) + p.SetParserRuleContext(localctx) + _prevctx = localctx + { + p.SetState(18) + p.Boolean() + } + + case BoolexpParserVARIABLE: + localctx = NewExpVariableContext(p, localctx) + p.SetParserRuleContext(localctx) + _prevctx = localctx + { + p.SetState(19) + p.Match(BoolexpParserVARIABLE) + } + + case BoolexpParserMETHODNAME: + localctx = NewExpFunctionContext(p, localctx) + p.SetParserRuleContext(localctx) + _prevctx = localctx + { + p.SetState(20) + p.Match(BoolexpParserMETHODNAME) + } + { + p.SetState(21) + p.Match(BoolexpParserLPAR) + } + p.SetState(23) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if ((_la)&-(0x1f+1)) == 0 && ((1< 0 { + spec.Args = programSpec.Args + } + + if programSpec.Configurable != "" { + spec.Configurable = programSpec.Configurable + } + + return spec +} diff --git a/x-pack/agent/pkg/core/plugin/app/execution_context.go b/x-pack/agent/pkg/core/plugin/app/execution_context.go new file mode 100644 index 00000000000..e627f9cfdd1 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/execution_context.go @@ -0,0 +1,34 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package app + +import ( + "crypto/sha256" + "fmt" +) + +// ExecutionContext describes runnable binary +type ExecutionContext struct { + BinaryName string + Version string + Tags map[Tag]string + ID string +} + +// NewExecutionContext creates an execution context and generates an ID for this context +func NewExecutionContext(binaryName, version string, tags map[Tag]string) ExecutionContext { + id := fmt.Sprintf("%s--%s", binaryName, version) + if len(tags) > 0 { + hash := fmt.Sprintf("%x", sha256.New().Sum([]byte(fmt.Sprint(tags)))) + id += fmt.Sprintf("--%x", hash) + } + + return ExecutionContext{ + BinaryName: binaryName, + Version: version, + Tags: tags, + ID: id, + } +} diff --git a/x-pack/agent/pkg/core/plugin/app/monitoring/beats/beats_monitor.go b/x-pack/agent/pkg/core/plugin/app/monitoring/beats/beats_monitor.go new file mode 100644 index 00000000000..146ab6c447f --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/monitoring/beats/beats_monitor.go @@ -0,0 +1,199 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package beats + +import ( + "net/url" + "os" + "path/filepath" + "strings" + "unicode" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" +) + +const httpPlusPrefix = "http+" + +// Monitor is a monitoring interface providing information about the way +// how beat is monitored +type Monitor struct { + pipelineID string + + process string + monitoringEndpoint string + loggingPath string + + monitorLogs bool + monitorMetrics bool +} + +// NewMonitor creates a beats monitor. +func NewMonitor(process, pipelineID string, downloadConfig *artifact.Config, monitorLogs, monitorMetrics bool) *Monitor { + var monitoringEndpoint, loggingPath string + + if monitorMetrics { + monitoringEndpoint = getMonitoringEndpoint(process, downloadConfig.OS(), pipelineID) + } + if monitorLogs { + loggingPath = getLoggingFileDirectory(downloadConfig.InstallPath, downloadConfig.OS(), pipelineID) + } + + return &Monitor{ + pipelineID: pipelineID, + process: process, + monitoringEndpoint: monitoringEndpoint, + loggingPath: loggingPath, + monitorLogs: monitorLogs, + monitorMetrics: monitorMetrics, + } +} + +// EnrichArgs enriches arguments provided to application, in order to enable +// monitoring +func (b *Monitor) EnrichArgs(args []string) []string { + appendix := make([]string, 0, 7) + + if b.monitoringEndpoint != "" { + appendix = append(appendix, + "-E", "http.enabled=true", + "-E", "http.host="+b.monitoringEndpoint, + ) + } + + if b.loggingPath != "" { + appendix = append(appendix, + "-E", "logging.files.path="+b.loggingPath, + "-E", "logging.files.name="+b.process, + "-E", "logging.files.keepfiles=7", + "-E", "logging.files.permission=0644", + "-E", "logging.files.interval=1h", + ) + } + + return append(args, appendix...) +} + +// Cleanup removes +func (b *Monitor) Cleanup() error { + // do not cleanup logs, they might not be all processed + drop := b.monitoringDrop() + if drop == "" { + return nil + } + + return os.RemoveAll(drop) +} + +// Prepare executes steps in order for monitoring to work correctly +func (b *Monitor) Prepare(uid, gid int) error { + drops := []string{b.loggingPath} + if drop := b.monitoringDrop(); drop != "" { + drops = append(drops, drop) + } + + for _, drop := range drops { + if drop == "" { + continue + } + + _, err := os.Stat(drop) + if err != nil { + if !os.IsNotExist(err) { + return err + } + + // create + if err := os.MkdirAll(drop, 0775); err != nil { + return err + } + } + + if err := os.Chown(drop, uid, gid); err != nil { + return err + } + } + + return nil +} + +// LogPath describes a path where application stores logs. Empty if +// application is not monitorable +func (b *Monitor) LogPath() string { + if !b.monitorLogs { + return "" + } + + return b.loggingPath +} + +// MetricsPath describes a location where application exposes metrics +// collectable by metricbeat. +func (b *Monitor) MetricsPath() string { + if !b.monitorMetrics { + return "" + } + + return b.monitoringEndpoint +} + +// MetricsPathPrefixed return metrics path prefixed with http+ prefix. +func (b *Monitor) MetricsPathPrefixed() string { + return httpPlusPrefix + b.MetricsPath() +} + +func (b *Monitor) monitoringDrop() string { + return monitoringDrop(b.monitoringEndpoint) +} + +func monitoringDrop(path string) (drop string) { + defer func() { + if drop != "" { + drop = filepath.Dir(drop) + } + }() + + if strings.Contains(path, "localhost") { + return "" + } + + if strings.HasPrefix(path, httpPlusPrefix) { + path = strings.TrimPrefix(path, httpPlusPrefix) + } + + // npipe is virtual without a drop + if isNpipe(path) { + return "" + } + + if isWindowsPath(path) { + return path + } + + u, _ := url.Parse(path) + if u == nil || (u.Scheme != "" && u.Scheme != "file" && u.Scheme != "unix") { + return "" + } + + if u.Scheme == "file" { + return strings.TrimPrefix(path, "file://") + } + + if u.Scheme == "unix" { + return strings.TrimPrefix(path, "unix://") + } + + return path +} + +func isNpipe(path string) bool { + return strings.HasPrefix(path, "npipe") || strings.HasPrefix(path, `\\.\pipe\`) +} + +func isWindowsPath(path string) bool { + if len(path) < 4 { + return false + } + return unicode.IsLetter(rune(path[0])) && path[1] == ':' +} diff --git a/x-pack/agent/pkg/core/plugin/app/monitoring/beats/drop_test.go b/x-pack/agent/pkg/core/plugin/app/monitoring/beats/drop_test.go new file mode 100644 index 00000000000..6478dda5225 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/monitoring/beats/drop_test.go @@ -0,0 +1,50 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package beats + +import ( + "runtime" + "testing" +) + +type testCase struct { + Endpoint string + Drop string + SkipWindows bool +} + +func TestMonitoringDrops(t *testing.T) { + cases := []testCase{ + testCase{`/var/lib/drop/abc.sock`, "/var/lib/drop", true}, + testCase{`npipe://drop`, "", false}, + testCase{`http+npipe://drop`, "", false}, + testCase{`\\.\pipe\drop`, "", false}, + testCase{`unix:///var/lib/drop/abc.sock`, "/var/lib/drop", true}, + testCase{`http+unix:///var/lib/drop/abc.sock`, "/var/lib/drop", true}, + testCase{`file:///var/lib/drop/abc.sock`, "/var/lib/drop", true}, + testCase{`http://localhost/stats`, "", false}, + testCase{`localhost/stats`, "", false}, + testCase{`http://localhost:8080/stats`, "", false}, + testCase{`localhost:8080/stats`, "", false}, + testCase{`http://1.2.3.4/stats`, "", false}, + testCase{`http://1.2.3.4:5678/stats`, "", false}, + testCase{`1.2.3.4:5678/stats`, "", false}, + testCase{`http://hithere.com:5678/stats`, "", false}, + testCase{`hithere.com:5678/stats`, "", false}, + } + + for _, c := range cases { + t.Run(c.Endpoint, func(t *testing.T) { + if runtime.GOOS == "windows" && c.SkipWindows { + t.Skip("Skipped under windows") + } + + drop := monitoringDrop(c.Endpoint) + if drop != c.Drop { + t.Errorf("Case[%s]: Expected '%s', got '%s'", c.Endpoint, c.Drop, drop) + } + }) + } +} diff --git a/x-pack/agent/pkg/core/plugin/app/monitoring/beats/monitoring.go b/x-pack/agent/pkg/core/plugin/app/monitoring/beats/monitoring.go new file mode 100644 index 00000000000..a69581b3ff9 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/monitoring/beats/monitoring.go @@ -0,0 +1,42 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package beats + +import ( + "fmt" + "path/filepath" +) + +const ( + // args: pipeline name, application name + logFileFormat = "/var/log/elastic-agent/%s/%s" + // args: install path, pipeline name, application name + logFileFormatWin = "%s\\logs\\elastic-agent\\%s\\%s" + + // args: pipeline name, application name + mbEndpointFileFormat = "unix:///var/run/elastic-agent/%s/%s/%s.sock" + // args: pipeline name, application name + mbEndpointFileFormatWin = `npipe:///%s-%s` +) + +func getMonitoringEndpoint(program, operatingSystem, pipelineID string) string { + if operatingSystem == "windows" { + return fmt.Sprintf(mbEndpointFileFormatWin, pipelineID, program) + } + + return fmt.Sprintf(mbEndpointFileFormat, pipelineID, program, program) +} + +func getLoggingFile(program, operatingSystem, installPath, pipelineID string) string { + if operatingSystem == "windows" { + return fmt.Sprintf(logFileFormatWin, installPath, pipelineID, program) + } + + return fmt.Sprintf(logFileFormat, pipelineID, program) +} + +func getLoggingFileDirectory(installPath, operatingSystem, pipelineID string) string { + return filepath.Base(getLoggingFile("program", operatingSystem, installPath, pipelineID)) +} diff --git a/x-pack/agent/pkg/core/plugin/app/monitoring/config.go b/x-pack/agent/pkg/core/plugin/app/monitoring/config.go new file mode 100644 index 00000000000..c5e2eba6a57 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/monitoring/config.go @@ -0,0 +1,11 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +// Config describes a configuration of a monitoring +type Config struct { + MonitorLogs bool `yaml:"logs" config:"logs"` + MonitorMetrics bool `yaml:"metrics" config:"metrics"` +} diff --git a/x-pack/agent/pkg/core/plugin/app/monitoring/monitor.go b/x-pack/agent/pkg/core/plugin/app/monitoring/monitor.go new file mode 100644 index 00000000000..ed820cd0ebe --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/monitoring/monitor.go @@ -0,0 +1,32 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +import ( + "github.com/elastic/beats/v7/x-pack/agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app/monitoring/beats" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/app/monitoring/noop" +) + +// Monitor is a monitoring interface providing information about the way +// how application is monitored +type Monitor interface { + EnrichArgs([]string) []string + Prepare(uid, gid int) error + Cleanup() error + LogPath() string + MetricsPath() string + MetricsPathPrefixed() string +} + +// NewMonitor creates a monitor based on a process configuration. +func NewMonitor(isMonitorable bool, process, pipelineID string, downloadConfig *artifact.Config, monitorLogs, monitorMetrics bool) Monitor { + if !isMonitorable { + return noop.NewMonitor() + } + + // so far we support only beats monitoring + return beats.NewMonitor(process, pipelineID, downloadConfig, monitorLogs, monitorMetrics) +} diff --git a/x-pack/agent/pkg/core/plugin/app/monitoring/noop/noop_monitor.go b/x-pack/agent/pkg/core/plugin/app/monitoring/noop/noop_monitor.go new file mode 100644 index 00000000000..93e8c2c46dc --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/monitoring/noop/noop_monitor.go @@ -0,0 +1,48 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package noop + +// Monitor is a monitoring interface providing information about the way +// how beat is monitored +type Monitor struct { +} + +// NewMonitor creates a beats monitor. +func NewMonitor() *Monitor { + return &Monitor{} +} + +// EnrichArgs enriches arguments provided to application, in order to enable +// monitoring +func (b *Monitor) EnrichArgs(args []string) []string { + return args +} + +// Cleanup cleans up all drops. +func (b *Monitor) Cleanup() error { + return nil +} + +// Prepare executes steps in order for monitoring to work correctly +func (b *Monitor) Prepare(uid, gid int) error { + return nil +} + +// LogPath describes a path where application stores logs. Empty if +// application is not monitorable +func (b *Monitor) LogPath() string { + return "" +} + +// MetricsPath describes a location where application exposes metrics +// collectable by metricbeat. +func (b *Monitor) MetricsPath() string { + return "" +} + +// MetricsPathPrefixed return metrics path prefixed with http+ prefix. +func (b *Monitor) MetricsPathPrefixed() string { + return "" +} diff --git a/x-pack/agent/pkg/core/plugin/app/process_cred.go b/x-pack/agent/pkg/core/plugin/app/process_cred.go new file mode 100644 index 00000000000..2b288b634d8 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/process_cred.go @@ -0,0 +1,63 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build linux darwin + +package app + +import ( + "os" + "os/user" + "strconv" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" +) + +func getUserGroup(spec ProcessSpec) (int, int, error) { + if spec.User.Uid == "" && spec.Group.Gid == "" { + // use own level + return os.Geteuid(), os.Getegid(), nil + } + + // check if user/group exists + usedUID := spec.User.Uid + userGID := "" + if u, err := user.LookupId(spec.User.Uid); err != nil { + u, err := user.Lookup(spec.User.Name) + if err != nil { + return 0, 0, err + } + usedUID = u.Uid + userGID = u.Gid + } else { + userGID = u.Gid + } + + usedGID := spec.Group.Gid + if spec.Group.Gid != "" || spec.Group.Name != "" { + if _, err := user.LookupGroupId(spec.Group.Gid); err != nil { + g, err := user.LookupGroup(spec.Group.Name) + if err != nil { + return 0, 0, err + } + + usedGID = g.Gid + } + } else { + // if group is not specified and user is found, use users group + usedGID = userGID + } + + uid, err := strconv.Atoi(usedUID) + if err != nil { + return 0, 0, errors.New(err, "invalid user") + } + + gid, _ := strconv.Atoi(usedGID) + if err != nil { + return 0, 0, errors.New(err, "invalid group") + } + + return uid, gid, nil +} diff --git a/x-pack/agent/pkg/core/plugin/app/process_cred_other.go b/x-pack/agent/pkg/core/plugin/app/process_cred_other.go new file mode 100644 index 00000000000..139b30de729 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/process_cred_other.go @@ -0,0 +1,12 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !linux +// +build !darwin + +package app + +func getUserGroup(spec ProcessSpec) (int, int, error) { + return 0, 0, nil +} diff --git a/x-pack/agent/pkg/core/plugin/app/spec.go b/x-pack/agent/pkg/core/plugin/app/spec.go new file mode 100644 index 00000000000..1dbd56ae612 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/spec.go @@ -0,0 +1,43 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package app + +import ( + "os/user" +) + +const ( + // ConfigurableGrpc is a flag telling agent that program has capability of Grpc server with a Config endpoint + ConfigurableGrpc = "grpc" + // ConfigurableFile is a flag telling agent that program has capability of being configured by accepting `-c filepath` + // argument with a configuration file provided + ConfigurableFile = "file" +) + +// Specifier returns a process specification. +type Specifier interface { + Spec() ProcessSpec +} + +// ProcessSpec specifies a way of running a process +type ProcessSpec struct { + // Binary path. + BinaryPath string + + // Set of arguments. + Args []string + + // Allows running third party application without + // the requirement for Config endpoint + // recognized options are: [grpc, file] + Configurable string + Configuration map[string]interface{} + + // Under what user we can run the program. (example: apm-server is not running as root, isolation and cgroup) + User user.User + Group user.Group + + // TODO: mapping transformation rules for configuration between agent.yml and to the beats. +} diff --git a/x-pack/agent/pkg/core/plugin/app/start.go b/x-pack/agent/pkg/core/plugin/app/start.go new file mode 100644 index 00000000000..818024e50dd --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/start.go @@ -0,0 +1,358 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package app + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "time" + "unicode" + + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/authority" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/process" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/state" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/remoteconfig" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/remoteconfig/grpc" +) + +const ( + configurationFlag = "-c" + configFileTempl = "%s.yml" // providing beat id + configFilePermissions = 0644 // writable only by owner +) + +// ConfiguratorClient is the client connecting agent and a process +type stateClient interface { + Status(ctx context.Context) (string, error) + Close() error +} + +// Start starts the application with a specified config. +func (a *Application) Start(ctx context.Context, cfg map[string]interface{}) (err error) { + defer func() { + if err != nil { + // inject App metadata + err = errors.New(err, errors.M(errors.MetaKeyAppName, a.name), errors.M(errors.MetaKeyAppName, a.id)) + } + }() + a.appLock.Lock() + defer a.appLock.Unlock() + + if a.state.Status == state.Running { + return nil + } + + defer func() { + if err != nil { + // reportError() + a.state.Status = state.Stopped + } + }() + + if err := a.monitor.Prepare(a.uid, a.gid); err != nil { + return err + } + + spec := a.spec.Spec() + if err := a.configureByFile(&spec, cfg); err != nil { + return errors.New(err, errors.TypeApplication) + } + + // TODO: provider -> client + ca, err := generateCA(spec.Configurable) + if err != nil { + return errors.New(err, errors.TypeSecurity) + } + processCreds, err := generateConfigurable(spec.Configurable, ca) + if err != nil { + return errors.New(err, errors.TypeSecurity) + } + + if a.limiter != nil { + a.limiter.Add() + } + + spec.Args = a.monitor.EnrichArgs(spec.Args) + + // specify beat name to avoid data lock conflicts + // as for https://github.com/elastic/beats/v7/pull/14030 more than one instance + // of the beat with same data path fails to start + spec.Args = injectDataPath(spec.Args, a.pipelineID, a.id) + + a.state.ProcessInfo, err = process.Start( + a.logger, + spec.BinaryPath, + a.processConfig, + a.uid, + a.gid, + processCreds, + spec.Args...) + if err != nil { + return err + } + + a.waitForGrpc(spec, ca) + + a.grpcClient, err = generateClient(spec.Configurable, a.state.ProcessInfo.Address, a.clientFactory, ca) + if err != nil { + return errors.New(err, errors.TypeSecurity) + } + a.state.Status = state.Running + + // setup watcher + a.watch(ctx, a.state.ProcessInfo.Process, cfg) + + return nil +} + +func (a *Application) waitForGrpc(spec ProcessSpec, ca *authority.CertificateAuthority) error { + const ( + rounds int = 3 + roundsTimeout time.Duration = 30 * time.Second + retries int = 5 + retryTimeout time.Duration = 2 * time.Second + ) + + // no need to wait, program is configured by file + if spec.Configurable != ConfigurableGrpc { + return nil + } + + checkFn := func(ctx context.Context, address string) error { + return a.checkGrpcHTTP(ctx, address, ca) + } + if isPipe(a.state.ProcessInfo.Address) { + checkFn = a.checkGrpcPipe + } + + for round := 1; round <= rounds; round++ { + for retry := 1; retry <= retries; retry++ { + c, cancelFn := context.WithTimeout(context.Background(), retryTimeout) + err := checkFn(c, a.state.ProcessInfo.Address) + if err == nil { + cancelFn() + return nil + } + cancelFn() + + // do not wait on last + if retry != retries { + <-time.After(retryTimeout) + } + } + // do not wait on last + if round != rounds { + time.After(time.Duration(round) * roundsTimeout) + } + } + + // do not err out, config calls will fail with after some more retries + return nil +} + +func isPipe(address string) bool { + address = strings.TrimPrefix(address, "http+") + return strings.HasPrefix(address, "file:") || + strings.HasPrefix(address, "unix:") || + strings.HasPrefix(address, "npipe") || + strings.HasPrefix(address, `\\.\pipe\`) || + isWindowsPath(address) +} + +func (a *Application) checkGrpcPipe(ctx context.Context, address string) error { + // TODO: not supported yet + return nil +} + +func (a *Application) checkGrpcHTTP(ctx context.Context, address string, ca *authority.CertificateAuthority) error { + grpcClient, err := generateClient(ConfigurableGrpc, a.state.ProcessInfo.Address, a.clientFactory, ca) + if err != nil { + return errors.New(err, errors.TypeSecurity) + } + + stateClient, ok := grpcClient.(stateClient) + if !ok { + // does not support getting state + // let successive calls fail/succeed + return nil + } + + result, err := stateClient.Status(ctx) + defer stateClient.Close() + if err != nil { + return errors.New(err, "getting state failed", errors.TypeNetwork) + } + + if strings.ToLower(result) != "ok" { + return errors.New( + fmt.Sprintf("getting state failed. not ok state received: '%s'", result), + errors.TypeNetwork) + } + + return nil +} + +func injectDataPath(args []string, pipelineID, id string) []string { + wd := "" + if w, err := os.Getwd(); err == nil { + wd = w + } + + dataPath := filepath.Join(wd, "data", pipelineID, id) + return append(args, "-E", "path.data="+dataPath) +} + +func generateCA(configurable string) (*authority.CertificateAuthority, error) { + if !isGrpcConfigurable(configurable) { + return nil, nil + } + + ca, err := authority.NewCA() + if err != nil { + return nil, errors.New(err, "app.Start", errors.TypeSecurity) + } + + return ca, nil +} + +func generateConfigurable(configurable string, ca *authority.CertificateAuthority) (*process.Creds, error) { + var processCreds *process.Creds + var err error + + if isGrpcConfigurable(configurable) { + processCreds, err = getProcessCredentials(configurable, ca) + if err != nil { + return nil, errors.New(err, errors.TypeSecurity) + } + } + + return processCreds, nil +} + +func generateClient(configurable, address string, factory remoteconfig.ConnectionCreator, ca *authority.CertificateAuthority) (remoteconfig.Client, error) { + var grpcClient remoteconfig.Client + + if isGrpcConfigurable(configurable) { + connectionProvider, err := getConnectionProvider(configurable, ca, address) + if err != nil { + return nil, errors.New(err, errors.TypeNetwork) + } + + grpcClient, err = factory.NewConnection(connectionProvider) + if err != nil { + return nil, errors.New(err, "creating connection", errors.TypeNetwork) + } + } + + return grpcClient, nil +} + +func getConnectionProvider(configurable string, ca *authority.CertificateAuthority, address string) (*grpc.ConnectionProvider, error) { + if !isGrpcConfigurable(configurable) { + return nil, nil + } + + clientPair, err := ca.GeneratePair() + if err != nil { + return nil, errors.New(err, errors.TypeNetwork) + } + + return grpc.NewConnectionProvider(address, ca.Crt(), clientPair.Key, clientPair.Crt), nil +} + +func isGrpcConfigurable(configurable string) bool { + return configurable == ConfigurableGrpc +} + +func (a *Application) configureByFile(spec *ProcessSpec, config map[string]interface{}) error { + // check if configured by file + if spec.Configurable != ConfigurableFile { + return nil + } + + // save yaml as filebeat_id.yml + filename := fmt.Sprintf(configFileTempl, a.id) + filePath, err := filepath.Abs(filepath.Join(a.downloadConfig.InstallPath, filename)) + if err != nil { + return errors.New(err, errors.TypeFilesystem) + } + + f, err := os.OpenFile(filePath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, configFilePermissions) + if err != nil { + return errors.New(err, errors.TypeFilesystem) + } + defer f.Close() + + // change owner + if err := os.Chown(filePath, a.uid, a.gid); err != nil { + return err + } + + encoder := yaml.NewEncoder(f) + if err := encoder.Encode(config); err != nil { + return errors.New(err, errors.TypeFilesystem) + } + defer encoder.Close() + + // update args + return updateSpecConfig(spec, filePath) +} + +func updateSpecConfig(spec *ProcessSpec, configPath string) error { + // check if config is already provided + configIndex := -1 + for i, v := range spec.Args { + if v == configurationFlag { + configIndex = i + break + } + } + + if configIndex != -1 { + // -c provided + if len(spec.Args) == configIndex+1 { + // -c is last argument, appending + spec.Args = append(spec.Args, configPath) + } + spec.Args[configIndex+1] = configPath + return nil + } + + spec.Args = append(spec.Args, configurationFlag, configPath) + return nil +} + +func getProcessCredentials(configurable string, ca *authority.CertificateAuthority) (*process.Creds, error) { + var processCreds *process.Creds + + if isGrpcConfigurable(configurable) { + // processPK and Cert serves as a server credentials + processPair, err := ca.GeneratePair() + if err != nil { + return nil, errors.New(err, "failed to generate credentials") + } + + processCreds = &process.Creds{ + CaCert: ca.Crt(), + PK: processPair.Key, + Cert: processPair.Crt, + } + } + + return processCreds, nil +} + +func isWindowsPath(path string) bool { + if len(path) < 4 { + return false + } + return unicode.IsLetter(rune(path[0])) && path[1] == ':' +} diff --git a/x-pack/agent/pkg/core/plugin/app/tag.go b/x-pack/agent/pkg/core/plugin/app/tag.go new file mode 100644 index 00000000000..8b719031b81 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/tag.go @@ -0,0 +1,12 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package app + +// Tag is a tag for specifying metadata related +// to a process. +type Tag string + +// TagSidecar tags a sidecar process +const TagSidecar = "sidecar" diff --git a/x-pack/agent/pkg/core/plugin/app/watch_posix.go b/x-pack/agent/pkg/core/plugin/app/watch_posix.go new file mode 100644 index 00000000000..f6e9be9d4fa --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/watch_posix.go @@ -0,0 +1,33 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !windows + +package app + +import ( + "os" + "syscall" + "time" +) + +// externalProcess is a watch mechanism used in cases where OS requires +// a process to be a child for waiting for process. We need to be able +// await any process. +// This operation is long running. +func (a *Application) externalProcess(proc *os.Process) { + if proc == nil { + return + } + + for { + select { + case <-time.After(1 * time.Second): + if proc.Signal(syscall.Signal(0)) != nil { + // failed to contact process, return + return + } + } + } +} diff --git a/x-pack/agent/pkg/core/plugin/app/watch_windows.go b/x-pack/agent/pkg/core/plugin/app/watch_windows.go new file mode 100644 index 00000000000..767e27dffed --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/app/watch_windows.go @@ -0,0 +1,55 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build windows + +package app + +import ( + "os" + "syscall" + "time" +) + +const ( + // exitCodeStillActive according to docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getexitcodeprocess + exitCodeStillActive = 259 +) + +// externalProcess is a watch mechanism used in cases where OS requires +// a process to be a child for waiting for process. We need to be able +// await any process +func (a *Application) externalProcess(proc *os.Process) { + if proc == nil { + return + } + + for { + select { + case <-time.After(1 * time.Second): + if isWindowsProcessExited(proc.Pid) { + return + } + } + } +} + +func isWindowsProcessExited(pid int) bool { + const desiredAccess = syscall.STANDARD_RIGHTS_READ | syscall.PROCESS_QUERY_INFORMATION | syscall.SYNCHRONIZE + h, err := syscall.OpenProcess(desiredAccess, false, uint32(pid)) + if err != nil { + // failed to open handle, report exited + return true + } + + // get exit code, this returns immediately in case it is still running + // it returns exitCodeStillActive + var ec uint32 + if err := syscall.GetExitCodeProcess(h, &ec); err != nil { + // failed to contact, report exited + return true + } + + return ec != exitCodeStillActive +} diff --git a/x-pack/agent/pkg/core/plugin/authority/ca.go b/x-pack/agent/pkg/core/plugin/authority/ca.go new file mode 100644 index 00000000000..f57c3ae2a49 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/authority/ca.go @@ -0,0 +1,146 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package authority + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "log" + "math/big" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" +) + +// CertificateAuthority is an abstraction for common certificate authority +// unique for process +type CertificateAuthority struct { + caCert *x509.Certificate + privateKey crypto.PrivateKey + caPEM []byte +} + +// Pair is a x509 Key/Cert pair +type Pair struct { + Crt []byte + Key []byte +} + +// NewCA creates a new certificate authority capable of generating child certificates +func NewCA() (*CertificateAuthority, error) { + ca := &x509.Certificate{ + SerialNumber: big.NewInt(1653), + Subject: pkix.Name{ + Organization: []string{"elastic-fleet"}, + CommonName: "localhost", + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(10, 0, 0), + IsCA: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + + privateKey, _ := rsa.GenerateKey(rand.Reader, 2048) + publicKey := &privateKey.PublicKey + caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, publicKey, privateKey) + if err != nil { + log.Println("create ca failed", err) + return nil, errors.New(err, "ca creation failed", errors.TypeSecurity) + } + + var pubKeyBytes, privateKeyBytes []byte + + certOut := bytes.NewBuffer(pubKeyBytes) + keyOut := bytes.NewBuffer(privateKeyBytes) + + // Public key + err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: caBytes}) + if err != nil { + return nil, errors.New(err, "signing ca certificate", errors.TypeSecurity) + } + + // Private key + err = pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}) + if err != nil { + return nil, errors.New(err, "generating ca private key", errors.TypeSecurity) + } + + // prepare tls + caPEM := certOut.Bytes() + caTLS, err := tls.X509KeyPair(caPEM, keyOut.Bytes()) + if err != nil { + return nil, errors.New(err, "generating ca x509 pair", errors.TypeSecurity) + } + + caCert, err := x509.ParseCertificate(caTLS.Certificate[0]) + if err != nil { + return nil, errors.New(err, "generating ca private key", errors.TypeSecurity) + } + + return &CertificateAuthority{ + privateKey: caTLS.PrivateKey, + caCert: caCert, + caPEM: caPEM, + }, nil +} + +// GeneratePair generates child certificate +func (c *CertificateAuthority) GeneratePair() (*Pair, error) { + // Prepare certificate + certTemplate := &x509.Certificate{ + SerialNumber: big.NewInt(1658), + Subject: pkix.Name{ + Organization: []string{"elastic-fleet"}, + CommonName: "localhost", + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(10, 0, 0), + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature, + } + privateKey, _ := rsa.GenerateKey(rand.Reader, 2048) + publicKey := &privateKey.PublicKey + + // Sign the certificate + certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, c.caCert, publicKey, c.privateKey) + if err != nil { + return nil, errors.New(err, "signing certificate", errors.TypeSecurity) + } + + var pubKeyBytes, privateKeyBytes []byte + + certOut := bytes.NewBuffer(pubKeyBytes) + keyOut := bytes.NewBuffer(privateKeyBytes) + + // Public key + err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: certBytes}) + if err != nil { + return nil, errors.New(err, "generating public key", errors.TypeSecurity) + } + + // Private key + err = pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}) + if err != nil { + return nil, errors.New(err, "generating private key", errors.TypeSecurity) + } + + return &Pair{ + Crt: certOut.Bytes(), + Key: keyOut.Bytes(), + }, nil +} + +// Crt returns crt cert of certificate authority +func (c *CertificateAuthority) Crt() []byte { + return c.caPEM +} diff --git a/x-pack/agent/pkg/core/plugin/process/cmd.go b/x-pack/agent/pkg/core/plugin/process/cmd.go new file mode 100644 index 00000000000..dad8fa39ee3 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/process/cmd.go @@ -0,0 +1,23 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !linux +// +build !darwin + +package process + +import ( + "os" + "os/exec" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +func getCmd(logger *logger.Logger, path string, env []string, uid, gid int, arg ...string) *exec.Cmd { + cmd := exec.Command(path, arg...) + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + + return cmd +} diff --git a/x-pack/agent/pkg/core/plugin/process/cmd_cred.go b/x-pack/agent/pkg/core/plugin/process/cmd_cred.go new file mode 100644 index 00000000000..054cbe290ae --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/process/cmd_cred.go @@ -0,0 +1,36 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build linux darwin + +package process + +import ( + "os" + "os/exec" + "path/filepath" + "syscall" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +func getCmd(logger *logger.Logger, path string, env []string, uid, gid int, arg ...string) *exec.Cmd { + cmd := exec.Command(path, arg...) + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Dir = filepath.Dir(path) + if isInt32(uid) && isInt32(gid) { + cmd.SysProcAttr = &syscall.SysProcAttr{ + Credential: &syscall.Credential{ + Uid: uint32(uid), + Gid: uint32(gid), + NoSetGroups: true, + }, + } + } else { + logger.Errorf("provided uid or gid for %s is invalid. uid: '%d' gid: '%d'.", path, uid, gid) + } + + return cmd +} diff --git a/x-pack/agent/pkg/core/plugin/process/config.go b/x-pack/agent/pkg/core/plugin/process/config.go new file mode 100644 index 00000000000..0791692aeb4 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/process/config.go @@ -0,0 +1,21 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package process + +import "time" + +// Config for fine tuning new process +type Config struct { + MinPortNumber int `yaml:"min_port" config:"min_port"` + MaxPortNumber int `yaml:"max_port" config:"max_port"` + SpawnTimeout time.Duration `yaml:"spawn_timeout" config:"spawn_timeout"` + + // Transport is one of `unix` or `tcp`. `unix` uses unix sockets and is not supported on windows. + // Windows falls back to `tcp` regardless of configuration. + // With invalid configuration fallback to `tcp` is used as well. + Transport string + + // TODO: cgroups and namespaces +} diff --git a/x-pack/agent/pkg/core/plugin/process/process.go b/x-pack/agent/pkg/core/plugin/process/process.go new file mode 100644 index 00000000000..a612e4a8352 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/process/process.go @@ -0,0 +1,204 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package process + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "io" + "math" + mrand "math/rand" + "net" + "os" + "path/filepath" + "runtime" + "time" + + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +const ( + // DefaultTimeout is timeout for starting a process, needs to be passed as a config + DefaultTimeout = 10 * time.Second + // MinPortNumberKey is a minimum port new process can get for newly created GRPC server + MinPortNumberKey = "MIN_PORT_NUMBER" + // MaxPortNumberKey is a maximum port new process can get for newly created GRPC server + MaxPortNumberKey = "MAX_PORT_NUMBER" + // DefaultMinPort is used when no configuration is provided + DefaultMinPort = 10000 + // DefaultMaxPort is used when no configuration is provided + DefaultMaxPort = 30000 + + transportUnix = "unix" + transportTCP = "tcp" +) + +var ( + // ErrProcessStartFailedTimeout is a failure of start due to timeout + ErrProcessStartFailedTimeout = errors.New("process failed to start due to timeout") +) + +// Info groups information about fresh new process +type Info struct { + Address string + PID int + Process *os.Process +} + +// Creds contains information for securing a communication +type Creds struct { + CaCert []byte + PK []byte + Cert []byte +} + +// Start starts a new process +// Returns: +// - network address of child process +// - process id +// - error +func Start(logger *logger.Logger, path string, config *Config, uid, gid int, creds *Creds, arg ...string) (processInfo *Info, err error) { + // inject env + grpcAddress, err := getGrpcAddress(config) + if err != nil { + return nil, errors.New(err, "failed to acquire grpc address") + } + + logger.Infof("address assigned to the process '%s': '%s'", path, grpcAddress) + + env := []string{ + fmt.Sprintf("SERVER_ADDRESS=%s", grpcAddress), + } + + // create a command + cmd := getCmd(logger, path, env, uid, gid, arg...) + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, err + } + + // start process + if err := cmd.Start(); err != nil { + return nil, errors.New(err, fmt.Sprintf("failed to start '%s'", path)) + } + + // push credentials + err = pushCredentials(stdin, creds) + + return &Info{ + PID: cmd.Process.Pid, + Process: cmd.Process, + Address: grpcAddress, + }, err +} + +// Stop stops the process based on the process id +func Stop(logger *logger.Logger, pid int) error { + proc, err := os.FindProcess(pid) + if err != nil { + // Process not found (it is already killed) we treat as a success + return nil + } + + return proc.Signal(os.Interrupt) +} + +// Attach assumes caller knows all the details about the process +// so it just tries to attach to existing PID and returns Process +// itself for awaiter +func Attach(logger *logger.Logger, pid int) (*Info, error) { + proc, err := os.FindProcess(pid) + if err != nil { + // Process not found we are unable to attach + return nil, err + } + + // We are attaching to an existing process, + // address is already known to caller + return &Info{ + PID: proc.Pid, + Process: proc, + }, nil +} + +func getGrpcAddress(config *Config) (string, error) { + if config.Transport == transportUnix && runtime.GOOS != "windows" { + getGrpcUnixAddress() + } + + return getGrpcTCPAddress(config.MinPortNumber, config.MaxPortNumber) +} + +func getGrpcUnixAddress() (string, error) { + for i := 0; i <= 100; i++ { + name := randSocketName() + if fi, err := os.Stat(name); err != nil || fi == nil { + return name, nil + } + } + + return "", fmt.Errorf("free unix socket not found, retry limit reached") +} + +func getGrpcTCPAddress(minPort, maxPort int) (string, error) { + if minPort == 0 { + minPort = DefaultMinPort + } + + if maxPort == 0 { + maxPort = DefaultMaxPort + } + + jitter := (maxPort - minPort) / 3 + if jitter > 0 { + mrand.Seed(time.Now().UnixNano()) + minPort += mrand.Intn(jitter) + } + + for port := minPort; port <= maxPort; port++ { + desiredAddress := fmt.Sprintf("127.0.0.1:%d", port) + listener, _ := net.Listen("tcp", desiredAddress) + if listener != nil { + // we found available port + listener.Close() + return desiredAddress, nil + } + } + + return "", fmt.Errorf("port not found in range %d-%d", minPort, maxPort) +} + +func randSocketName() string { + randBytes := make([]byte, 10) + rand.Read(randBytes) + return filepath.Join(os.TempDir(), hex.EncodeToString(randBytes)+".sock") +} + +func isInt32(val int) bool { + return val >= 0 && val <= math.MaxInt32 +} + +func pushCredentials(w io.Writer, c *Creds) error { + if c == nil { + return nil + } + + credbytes, err := yaml.Marshal(c) + if err != nil { + return err + } + + _, err = w.Write(credbytes) + + // this gives beat with grpc a bit of time to spin up a goroutine and start a server. + // should be ok until we come up with more clever solution. + // Issue: https://github.com/elastic/beats/v7/issues/15634 + <-time.After(1500 * time.Millisecond) + return err +} diff --git a/x-pack/agent/pkg/core/plugin/retry/config.go b/x-pack/agent/pkg/core/plugin/retry/config.go new file mode 100644 index 00000000000..a9487792a17 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/retry/config.go @@ -0,0 +1,30 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package retry + +import "time" + +const ( + defaultRetriesCount = 3 + defaultDelay = 30 * time.Second + defaultMaxDelay = 5 * time.Minute +) + +// Config is a configuration of a strategy +type Config struct { + // Enabled determines whether retry is possible. Default is false. + Enabled bool `yaml:"enabled" config:"enabled"` + // RetriesCount specifies number of retries. Default is 3. + // Retry count of 1 means it will be retried one time after one failure. + RetriesCount int `yaml:"retriesCount" config:"retriesCount"` + // Delay specifies delay in ms between retries. Default is 30s + Delay time.Duration `yaml:"delay" config:"delay"` + // MaxDelay specifies maximum delay in ms between retries. Default is 300s + MaxDelay time.Duration `yaml:"maxDelay" config:"maxDelay"` + // Exponential determines whether delay is treated as exponential. + // With 30s delay and 3 retries: 30, 60, 120s + // Default is false + Exponential bool `yaml:"exponential" config:"exponential"` +} diff --git a/x-pack/agent/pkg/core/plugin/retry/error.go b/x-pack/agent/pkg/core/plugin/retry/error.go new file mode 100644 index 00000000000..b5ef3bda746 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/retry/error.go @@ -0,0 +1,30 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package retry + +// Fatal in retry package is an interface each error needs to implement +// in order to say whether or not it is fatal. +type Fatal interface { + Fatal() bool +} + +// FatalError wraps an error and is always fatal +type FatalError struct { + error +} + +// Fatal determines whether or not error is fatal +func (*FatalError) Fatal() bool { + return true +} + +// ErrorMakeFatal is a shorthand for making an error fatal +func ErrorMakeFatal(err error) error { + if err == nil { + return err + } + + return FatalError{err} +} diff --git a/x-pack/agent/pkg/core/plugin/retry/retrystrategy.go b/x-pack/agent/pkg/core/plugin/retry/retrystrategy.go new file mode 100644 index 00000000000..9036ef7be44 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/retry/retrystrategy.go @@ -0,0 +1,110 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package retry + +import ( + "time" + + "github.com/elastic/beats/v7/libbeat/common/backoff" +) + +// DoWithBackoff ignores retry config of delays and lets backoff decide how much time it needs. +func DoWithBackoff(config *Config, b backoff.Backoff, fn func() error, fatalErrors ...error) error { + retryCount := getRetryCount(config) + var err error + + for retryNo := 0; retryNo <= retryCount; retryNo++ { + err = fn() + if err == nil || isFatal(err, fatalErrors...) { + b.Reset() + return err + } + + if retryNo < retryCount { + b.Wait() + } + } + + return err +} + +// Do runs provided function in a manner specified in retry configuration +func Do(config *Config, fn func() error, fatalErrors ...error) error { + retryCount := getRetryCount(config) + var err error + + for retryNo := 0; retryNo <= retryCount; retryNo++ { + err = fn() + if err == nil { + return nil + } + + if isFatal(err, fatalErrors...) { + return err + } + + if retryNo < retryCount { + <-time.After(getDelayDuration(config, retryNo)) + } + } + + return err +} + +func getRetryCount(config *Config) int { + if config == nil { + return defaultRetriesCount + } + + if !config.Enabled { + return 0 + } + + if config.RetriesCount > 0 { + return config.RetriesCount + } + + return defaultRetriesCount +} + +func getDelayDuration(config *Config, retryNo int) time.Duration { + delay := defaultDelay + + if config != nil { + if config.Delay > 0 { + delay = config.Delay + } + + if config.Exponential { + delay = time.Duration(delay.Nanoseconds() * int64(retryNo+1)) + } + } + + maxDelay := config.MaxDelay + if maxDelay == 0 { + maxDelay = defaultMaxDelay + } + if delay > maxDelay { + delay = maxDelay + } + return time.Duration(delay) +} + +// Error is fatal either if it implements Error interface and says so +// or if it is equal to one of the fatal values provided +func isFatal(err error, fatalErrors ...error) bool { + if fatalerr, ok := err.(Fatal); ok { + return fatalerr.Fatal() + } + + for _, e := range fatalErrors { + if e == err { + return true + } + } + + // What does not match criteria is considered transient + return false +} diff --git a/x-pack/agent/pkg/core/plugin/retry/retrystrategy_test.go b/x-pack/agent/pkg/core/plugin/retry/retrystrategy_test.go new file mode 100644 index 00000000000..6ad623702ad --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/retry/retrystrategy_test.go @@ -0,0 +1,182 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package retry + +import ( + "errors" + "testing" + "time" + + "github.com/elastic/beats/v7/libbeat/common/backoff" +) + +func TestRetry(t *testing.T) { + type testCase struct { + Fn func() error + ExpectedExecutions int64 + IsErrExpected bool + Enabled bool + Exponential bool + } + + errFatal := errors.New("fatal") + var executions int64 + + testCases := map[string]testCase{ + "not-failing": testCase{Fn: func() error { executions++; return nil }, ExpectedExecutions: 1, Enabled: true}, + "failing": testCase{Fn: func() error { executions++; return errors.New("fail") }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true}, + "fatal-by-enum": testCase{Fn: func() error { executions++; return errFatal }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: true}, + "fatal-by-iface": testCase{Fn: func() error { executions++; return ErrFatal{} }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: true}, + "not-fatal-by-iface": testCase{Fn: func() error { executions++; return ErrNotFatal{} }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true}, + + "dis-not-failing": testCase{Fn: func() error { executions++; return nil }, ExpectedExecutions: 1, Enabled: false}, + "dis-failing": testCase{Fn: func() error { executions++; return errors.New("fail") }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: false}, + "dis-fatal-by-enum": testCase{Fn: func() error { executions++; return errFatal }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: false}, + "dis-fatal-by-iface": testCase{Fn: func() error { executions++; return ErrFatal{} }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: false}, + "dis-not-fatal-by-iface": testCase{Fn: func() error { executions++; return ErrNotFatal{} }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: false}, + + "failing-exp": testCase{Fn: func() error { executions++; return errors.New("fail") }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true, Exponential: true}, + } + + config := &Config{ + RetriesCount: 3, + Delay: 500 * time.Millisecond, + } + + for n, tc := range testCases { + testFn := tc.Fn + executions = 0 + config.Enabled = tc.Enabled + config.Exponential = tc.Exponential + + startTime := time.Now() + err := Do(config, testFn, errFatal) + + executionTime := time.Since(startTime) + minExecutionTime := getMinExecutionTime(config.Delay, tc.ExpectedExecutions, tc.Exponential) + maxExecutionTime := getMaxExecutionTime(config.Delay, tc.ExpectedExecutions, tc.Exponential) + if tc.ExpectedExecutions > 1 && (executionTime < minExecutionTime || executionTime > maxExecutionTime) { + t.Fatalf("[%s]: expecting execution time between %d-%d ns, got: %v", n, minExecutionTime, maxExecutionTime, executionTime) + } + + if (err == nil) == tc.IsErrExpected { + t.Fatalf("[%s]: expecting error, got: %v", n, err) + } + + if executions != tc.ExpectedExecutions { + t.Fatalf("[%s]: expecting %d executions, got: %d", n, tc.ExpectedExecutions, executions) + } + } +} + +func TestRetryWithBackoff(t *testing.T) { + type testCase struct { + Fn func() error + ExpectedExecutions int + IsErrExpected bool + Enabled bool + } + + errFatal := errors.New("fatal") + executions := 0 + + testCases := map[string]testCase{ + "not-failing": testCase{Fn: func() error { executions++; return nil }, ExpectedExecutions: 1, Enabled: true}, + "failing": testCase{Fn: func() error { executions++; return errors.New("fail") }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true}, + "fatal-by-enum": testCase{Fn: func() error { executions++; return errFatal }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: true}, + "fatal-by-iface": testCase{Fn: func() error { executions++; return ErrFatal{} }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: true}, + "not-fatal-by-iface": testCase{Fn: func() error { executions++; return ErrNotFatal{} }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true}, + } + + config := &Config{ + RetriesCount: 3, + Delay: 5000, + } + maxDelay := time.Duration(config.Delay) * time.Millisecond + + done := make(chan struct{}) + maxWaitTime := 200 * time.Millisecond + minWaitTime := 50 * time.Millisecond + backoff := backoff.NewEqualJitterBackoff(done, minWaitTime, maxWaitTime) + + for n, tc := range testCases { + testFn := tc.Fn + executions = 0 + config.Enabled = tc.Enabled + + startTime := time.Now() + err := DoWithBackoff(config, backoff, testFn, errFatal) + + executionTime := time.Since(startTime) + minExecTime := getBackoffMinTime(minWaitTime, maxWaitTime, tc.ExpectedExecutions) + if tc.ExpectedExecutions > 1 && (executionTime < minExecTime || executionTime > maxDelay) { + t.Fatalf("[%s]: expecting execution time between %d-%d ns, got: %v", n, minExecTime, maxDelay, executionTime) + } + + if (err == nil) == tc.IsErrExpected { + t.Fatalf("[%s]: expecting error, got: %v", n, err) + } + + if executions != tc.ExpectedExecutions { + t.Fatalf("[%s]: expecting %d executions, got: %d", n, tc.ExpectedExecutions, executions) + } + } +} + +type ErrFatal struct{ error } + +func (ErrFatal) Fatal() bool { + return true +} + +type ErrNotFatal struct{ error } + +func (ErrNotFatal) Fatal() bool { + return false +} + +func getMaxExecutionTime(delayDuration time.Duration, executions int64, exponential bool) time.Duration { + delay := delayDuration.Nanoseconds() + execTime := (executions)*delay + (delay / 2) + if exponential { + execTime = 0 + for i := int64(0); i < executions; i++ { + execTime += i * delay + } + execTime += (delay / 2) * executions + } + + return time.Duration(execTime) +} + +func getMinExecutionTime(delayDuration time.Duration, executions int64, exponential bool) time.Duration { + delay := delayDuration.Nanoseconds() + execTime := (executions-1)*delay - (delay / 2) + if exponential { + execTime = 0 + for i := int64(0); i < executions; i++ { + execTime += i * delay + } + execTime -= (delay / 2) + } + + if execTime < 0 { + execTime = 0 + } + return time.Duration(execTime) +} + +func getBackoffMinTime(delay time.Duration, maxWaitTime time.Duration, executions int) time.Duration { + var duration time.Duration + for i := 1; i < executions; i++ { + duration += delay + delay *= 2 + if delay > maxWaitTime { + delay = maxWaitTime + } + } + + return duration +} diff --git a/x-pack/agent/pkg/core/plugin/server/server.go b/x-pack/agent/pkg/core/plugin/server/server.go new file mode 100644 index 00000000000..cd7eb46e4ae --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/server/server.go @@ -0,0 +1,93 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package server + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net" + "os" + + rpc "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/process" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/remoteconfig/grpc" +) + +const ( + serverAddressKey = "SERVER_ADDRESS" +) + +// NewGrpcServer creates a server and pairs it with fleet. +// Reads secrets from provided reader, registers provided server +// and starts listening on negotiated address +func NewGrpcServer(secretsReader io.Reader, configServer grpc.ConfiguratorServer) error { + // get creds from agent + var cred *process.Creds + secrets, err := ioutil.ReadAll(secretsReader) + if err != nil { + return errors.New(err, "failed to retrieve secrets from provided input") + } + + err = yaml.Unmarshal(secrets, &cred) + if err != nil { + return errors.New(err, "failed to parse secrets from provided input") + } + + // setup grpc server + serverAddress, found := os.LookupEnv(serverAddressKey) + if !found { + return errors.New("server address not specified") + } + + pair, err := tls.X509KeyPair(cred.Cert, cred.PK) + if err != nil { + return errors.New(err, "failed to load x509 key-pair") + } + + // Create CA cert pool + certPool := x509.NewCertPool() + if ok := certPool.AppendCertsFromPEM(cred.CaCert); !ok { + errors.New("failed to append client certs") + } + + fmt.Printf("Listening at %s\n", serverAddress) + lis, err := net.Listen("tcp", serverAddress) + if err != nil { + return errors.New(err, + fmt.Sprintf("failed to start server: %v", serverAddress), + errors.TypeNetwork, + errors.M(errors.MetaKeyURI, serverAddress)) + } + + // Create the TLS credentials + serverCreds := credentials.NewTLS(&tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + Certificates: []tls.Certificate{pair}, + ClientCAs: certPool, + }) + + // Create the gRPC server with the credentials + srv := rpc.NewServer(rpc.Creds(serverCreds)) + + // Register the handler object + grpc.RegisterConfiguratorServer(srv, configServer) + + // Serve and Listen + if err := srv.Serve(lis); err != nil { + return errors.New(err, + fmt.Sprintf("grpc serve error: %s", serverAddress), + errors.TypeNetwork, + errors.M(errors.MetaKeyURI, serverAddress)) + } + + return nil +} diff --git a/x-pack/agent/pkg/core/plugin/state/state.go b/x-pack/agent/pkg/core/plugin/state/state.go new file mode 100644 index 00000000000..210517e35a8 --- /dev/null +++ b/x-pack/agent/pkg/core/plugin/state/state.go @@ -0,0 +1,25 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package state + +import "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/process" + +// Status describes the current status of the application process. +type Status int + +const ( + // Stopped is status describing not running application. + Stopped Status = iota + // Running signals that application is currently running. + Running + // Restarting means process crashed and is being started again. + Restarting +) + +// State wraps the process state and application status. +type State struct { + ProcessInfo *process.Info + Status Status +} diff --git a/x-pack/agent/pkg/core/remoteconfig/config.go b/x-pack/agent/pkg/core/remoteconfig/config.go new file mode 100644 index 00000000000..5e9b879d299 --- /dev/null +++ b/x-pack/agent/pkg/core/remoteconfig/config.go @@ -0,0 +1,31 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package remoteconfig + +import ( + "context" +) + +// Client for remote calls +type Client interface{} + +// ConfiguratorClient is the client connecting agent and a process +type ConfiguratorClient interface { + Config(ctx context.Context, config string) error + Close() error +} + +// ConnectionCreator describes a creator of connections. +// ConnectionCreator should be used in client vault to generate new connections. +type ConnectionCreator interface { + NewConnection(address ConnectionProvider) (Client, error) +} + +// ConnectionProvider is a basic provider everybody needs to implement +// in order to provide a valid connection. +// Minimal set of properties is: address +type ConnectionProvider interface { + Address() string +} diff --git a/x-pack/agent/pkg/core/remoteconfig/grpc/configclient.go b/x-pack/agent/pkg/core/remoteconfig/grpc/configclient.go new file mode 100644 index 00000000000..d211643ef42 --- /dev/null +++ b/x-pack/agent/pkg/core/remoteconfig/grpc/configclient.go @@ -0,0 +1,93 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package grpc + +import ( + context "context" + "errors" + "time" + + grpc "google.golang.org/grpc" + rpc "google.golang.org/grpc" + + "github.com/elastic/beats/v7/libbeat/common/backoff" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/remoteconfig" +) + +var ( + // ErrNotGrpcClient is used when connection passed into a factory is not a grpc connection + ErrNotGrpcClient = errors.New("not a grpc client") + // ErrProviderNotProvided is used when provider passed into factory is not provided + ErrProviderNotProvided = errors.New("provider not provided") + // ErrProviderIncorrectType is used when provider passed into factory does not implement grpcConnectionProvided + ErrProviderIncorrectType = errors.New("provided provider has incorrect type") +) + +// CreateConfiguratorClient creates a new client from a connection passed in. +// This wraps generated grpc implementation so the change of the underlying +// technology is just the change of the namespace. +func CreateConfiguratorClient(conn interface{}, delay, maxDelay time.Duration) (remoteconfig.ConfiguratorClient, error) { + grpcConn, ok := conn.(*rpc.ClientConn) + if !ok { + return nil, ErrNotGrpcClient + } + + var boff backoff.Backoff + done := make(chan struct{}) + + if delay > 0 && maxDelay > 0 { + boff = backoff.NewEqualJitterBackoff(done, delay, maxDelay) + } else { + // no retry strategy configured + boff = NewNoopBackoff() + } + + return &client{ + grpcConn: grpcConn, + client: NewConfiguratorClient(grpcConn), + backoff: boff, + done: done, + }, nil +} + +type client struct { + grpcConn *grpc.ClientConn + client ConfiguratorClient + backoff backoff.Backoff + done chan struct{} +} + +// Config performs grpc Config request. +func (c *client) Config(ctx context.Context, config string) error { + request := ConfigRequest{ + Config: string(config), + } + + _, err := c.client.Config(ctx, &request) + backoff.WaitOnError(c.backoff, err) + + return err +} + +// Status performs grpc Status request. +func (c *client) Status(ctx context.Context) (string, error) { + request := StatusRequest{} + res, err := c.client.Status(ctx, &request) + if err != nil { + return "", err + } + + return res.Status, nil +} + +// Close cleans up resources. +func (c *client) Close() error { + close(c.done) + return c.grpcConn.Close() +} + +func (c *client) Backoff() backoff.Backoff { + return c.backoff +} diff --git a/x-pack/agent/pkg/core/remoteconfig/grpc/connection_provider.go b/x-pack/agent/pkg/core/remoteconfig/grpc/connection_provider.go new file mode 100644 index 00000000000..19bf9fa9a88 --- /dev/null +++ b/x-pack/agent/pkg/core/remoteconfig/grpc/connection_provider.go @@ -0,0 +1,55 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package grpc + +import ( + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/remoteconfig" +) + +var _ remoteconfig.ConnectionProvider = (*ConnectionProvider)(nil) +var _ grpcConnectionProvider = (*ConnectionProvider)(nil) + +// ConnectionProvider is a connection provider for grpc connections +type ConnectionProvider struct { + address string + caCrt []byte + clientPrivateKey []byte + clientCert []byte +} + +type grpcConnectionProvider interface { + remoteconfig.ConnectionProvider + CA() []byte + Cert() []byte + PK() []byte + IsSecured() bool +} + +// NewConnectionProvider creates a new connection provider for grpc connections +func NewConnectionProvider(address string, caCrt []byte, clientPrivateKey, clientCert []byte) *ConnectionProvider { + return &ConnectionProvider{ + address: address, + caCrt: caCrt, + clientPrivateKey: clientPrivateKey, + clientCert: clientCert, + } +} + +// Address returns an address used for connecting to a client +func (c *ConnectionProvider) Address() string { return c.address } + +// CA returns a certificate authority associated with a connection +func (c *ConnectionProvider) CA() []byte { return c.caCrt } + +// Cert returns a public certificate associated with a connection +func (c *ConnectionProvider) Cert() []byte { return c.clientCert } + +// PK returns a private key associated with a connection +func (c *ConnectionProvider) PK() []byte { return c.clientPrivateKey } + +// IsSecured returns true if all bits for setting up a secure connection were provided +func (c *ConnectionProvider) IsSecured() bool { + return c.caCrt != nil && c.clientCert != nil && c.clientPrivateKey != nil +} diff --git a/x-pack/agent/pkg/core/remoteconfig/grpc/factory.go b/x-pack/agent/pkg/core/remoteconfig/grpc/factory.go new file mode 100644 index 00000000000..386a2b69ea7 --- /dev/null +++ b/x-pack/agent/pkg/core/remoteconfig/grpc/factory.go @@ -0,0 +1,79 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package grpc + +import ( + "crypto/tls" + "crypto/x509" + "time" + + "google.golang.org/grpc/credentials" + + rpc "google.golang.org/grpc" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/remoteconfig" +) + +// NewConnFactory creates a factory used to create connection. Hides implementation details +// of the underlying connections. +func NewConnFactory(backoffDelay, backoffMaxDelay time.Duration) remoteconfig.ConnectionCreator { + return &connectionFactory{ + backoffDelay: backoffDelay, + backoffMaxDelay: backoffMaxDelay, + } +} + +type connectionFactory struct { + backoffDelay time.Duration + backoffMaxDelay time.Duration +} + +// NewConnection creates a connection +func (c *connectionFactory) NewConnection(provider remoteconfig.ConnectionProvider) (remoteconfig.Client, error) { + if provider == nil { + return nil, ErrProviderNotProvided + } + + grpcProvider, ok := provider.(grpcConnectionProvider) + if !ok { + return nil, ErrProviderIncorrectType + } + + if !grpcProvider.IsSecured() { + conn, err := rpc.Dial(provider.Address(), rpc.WithInsecure()) + if err != nil { + return nil, err + } + + return CreateConfiguratorClient(conn, c.backoffDelay, c.backoffMaxDelay) + } + + // Load client certificate + pair, err := tls.X509KeyPair(grpcProvider.Cert(), grpcProvider.PK()) + if err != nil { + return nil, errors.New(err, "creating client certificate pair") + } + + // Load Cert Auth + certPool := x509.NewCertPool() + if ok := certPool.AppendCertsFromPEM(grpcProvider.CA()); !ok { + return nil, errors.New("failed to append client certificate to CA pool") + } + + // Construct credentials + creds := credentials.NewTLS(&tls.Config{ + RootCAs: certPool, + Certificates: []tls.Certificate{pair}, + ServerName: "localhost", + }) + + conn, err := rpc.Dial(provider.Address(), rpc.WithTransportCredentials(creds)) + if err != nil { + return nil, err + } + + return CreateConfiguratorClient(conn, c.backoffDelay, c.backoffMaxDelay) +} diff --git a/x-pack/agent/pkg/core/remoteconfig/grpc/noop_backoff.go b/x-pack/agent/pkg/core/remoteconfig/grpc/noop_backoff.go new file mode 100644 index 00000000000..6599d106734 --- /dev/null +++ b/x-pack/agent/pkg/core/remoteconfig/grpc/noop_backoff.go @@ -0,0 +1,26 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package grpc + +import ( + "github.com/elastic/beats/v7/libbeat/common/backoff" +) + +// NoopBackoff implements a backoff interface without any wait. +// Used when no backoff is configured. +type NoopBackoff struct{} + +// NewNoopBackoff returns a new EqualJitter object. +func NewNoopBackoff() backoff.Backoff { + return &NoopBackoff{} +} + +// Reset resets the duration of the backoff. +func (b *NoopBackoff) Reset() {} + +// Wait block until either the timer is completed or channel is done. +func (b *NoopBackoff) Wait() bool { + return true +} diff --git a/x-pack/agent/pkg/core/remoteconfig/grpc/remote_config.pb.go b/x-pack/agent/pkg/core/remoteconfig/grpc/remote_config.pb.go new file mode 100644 index 00000000000..4048c8a0ee1 --- /dev/null +++ b/x-pack/agent/pkg/core/remoteconfig/grpc/remote_config.pb.go @@ -0,0 +1,310 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: remote_config.proto + +package grpc + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ConfigRequest struct { + Config string `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigRequest) Reset() { *m = ConfigRequest{} } +func (m *ConfigRequest) String() string { return proto.CompactTextString(m) } +func (*ConfigRequest) ProtoMessage() {} +func (*ConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_16fc0d99571fe457, []int{0} +} + +func (m *ConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigRequest.Unmarshal(m, b) +} +func (m *ConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigRequest.Marshal(b, m, deterministic) +} +func (m *ConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigRequest.Merge(m, src) +} +func (m *ConfigRequest) XXX_Size() int { + return xxx_messageInfo_ConfigRequest.Size(m) +} +func (m *ConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigRequest proto.InternalMessageInfo + +func (m *ConfigRequest) GetConfig() string { + if m != nil { + return m.Config + } + return "" +} + +type ConfigResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigResponse) Reset() { *m = ConfigResponse{} } +func (m *ConfigResponse) String() string { return proto.CompactTextString(m) } +func (*ConfigResponse) ProtoMessage() {} +func (*ConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_16fc0d99571fe457, []int{1} +} + +func (m *ConfigResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigResponse.Unmarshal(m, b) +} +func (m *ConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigResponse.Marshal(b, m, deterministic) +} +func (m *ConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigResponse.Merge(m, src) +} +func (m *ConfigResponse) XXX_Size() int { + return xxx_messageInfo_ConfigResponse.Size(m) +} +func (m *ConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigResponse proto.InternalMessageInfo + +type StatusRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_16fc0d99571fe457, []int{2} +} + +func (m *StatusRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatusRequest.Unmarshal(m, b) +} +func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) +} +func (m *StatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusRequest.Merge(m, src) +} +func (m *StatusRequest) XXX_Size() int { + return xxx_messageInfo_StatusRequest.Size(m) +} +func (m *StatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusRequest proto.InternalMessageInfo + +type StatusResponse struct { + Status string `protobuf:"bytes,1,opt,name=Status,proto3" json:"Status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_16fc0d99571fe457, []int{3} +} + +func (m *StatusResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatusResponse.Unmarshal(m, b) +} +func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) +} +func (m *StatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusResponse.Merge(m, src) +} +func (m *StatusResponse) XXX_Size() int { + return xxx_messageInfo_StatusResponse.Size(m) +} +func (m *StatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusResponse proto.InternalMessageInfo + +func (m *StatusResponse) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +func init() { + proto.RegisterType((*ConfigRequest)(nil), "remoteconfig.ConfigRequest") + proto.RegisterType((*ConfigResponse)(nil), "remoteconfig.ConfigResponse") + proto.RegisterType((*StatusRequest)(nil), "remoteconfig.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "remoteconfig.StatusResponse") +} + +func init() { proto.RegisterFile("remote_config.proto", fileDescriptor_16fc0d99571fe457) } + +var fileDescriptor_16fc0d99571fe457 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2e, 0x4a, 0xcd, 0xcd, + 0x2f, 0x49, 0x8d, 0x4f, 0xce, 0xcf, 0x4b, 0xcb, 0x4c, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0xe2, 0x81, 0x08, 0x42, 0xc4, 0x94, 0xd4, 0xb9, 0x78, 0x9d, 0xc1, 0xac, 0xa0, 0xd4, 0xc2, 0xd2, + 0xd4, 0xe2, 0x12, 0x21, 0x31, 0x2e, 0x36, 0x88, 0x94, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, + 0x94, 0xa7, 0x24, 0xc0, 0xc5, 0x07, 0x53, 0x58, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0xaa, 0xc4, 0xcf, + 0xc5, 0x1b, 0x5c, 0x92, 0x58, 0x52, 0x5a, 0x0c, 0xd5, 0xaa, 0xa4, 0xc1, 0xc5, 0x07, 0x13, 0x80, + 0x28, 0x01, 0x19, 0x06, 0x11, 0x81, 0x19, 0x06, 0xe1, 0x19, 0xcd, 0x61, 0xe4, 0xe2, 0x81, 0x98, + 0x56, 0x5a, 0x94, 0x58, 0x92, 0x5f, 0x24, 0xe4, 0xca, 0xc5, 0x06, 0xe1, 0x0b, 0x49, 0xeb, 0x21, + 0xbb, 0x4f, 0x0f, 0xc5, 0x71, 0x52, 0x32, 0xd8, 0x25, 0xa1, 0x0e, 0x62, 0x00, 0x19, 0x03, 0xb1, + 0x01, 0xdd, 0x18, 0x14, 0x87, 0xa2, 0x1b, 0x83, 0xea, 0x68, 0x25, 0x86, 0x24, 0x36, 0x70, 0x48, + 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x06, 0x93, 0xe1, 0x10, 0x40, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ConfiguratorClient is the client API for Configurator service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ConfiguratorClient interface { + Config(ctx context.Context, in *ConfigRequest, opts ...grpc.CallOption) (*ConfigResponse, error) + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) +} + +type configuratorClient struct { + cc *grpc.ClientConn +} + +func NewConfiguratorClient(cc *grpc.ClientConn) ConfiguratorClient { + return &configuratorClient{cc} +} + +func (c *configuratorClient) Config(ctx context.Context, in *ConfigRequest, opts ...grpc.CallOption) (*ConfigResponse, error) { + out := new(ConfigResponse) + err := c.cc.Invoke(ctx, "/remoteconfig.Configurator/Config", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configuratorClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := c.cc.Invoke(ctx, "/remoteconfig.Configurator/Status", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ConfiguratorServer is the server API for Configurator service. +type ConfiguratorServer interface { + Config(context.Context, *ConfigRequest) (*ConfigResponse, error) + Status(context.Context, *StatusRequest) (*StatusResponse, error) +} + +// UnimplementedConfiguratorServer can be embedded to have forward compatible implementations. +type UnimplementedConfiguratorServer struct { +} + +func (*UnimplementedConfiguratorServer) Config(ctx context.Context, req *ConfigRequest) (*ConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Config not implemented") +} +func (*UnimplementedConfiguratorServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +} + +func RegisterConfiguratorServer(s *grpc.Server, srv ConfiguratorServer) { + s.RegisterService(&_Configurator_serviceDesc, srv) +} + +func _Configurator_Config_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfiguratorServer).Config(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/remoteconfig.Configurator/Config", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfiguratorServer).Config(ctx, req.(*ConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Configurator_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfiguratorServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/remoteconfig.Configurator/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfiguratorServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Configurator_serviceDesc = grpc.ServiceDesc{ + ServiceName: "remoteconfig.Configurator", + HandlerType: (*ConfiguratorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Config", + Handler: _Configurator_Config_Handler, + }, + { + MethodName: "Status", + Handler: _Configurator_Status_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "remote_config.proto", +} diff --git a/x-pack/agent/pkg/crypto/io.go b/x-pack/agent/pkg/crypto/io.go new file mode 100644 index 00000000000..fca7cb6b188 --- /dev/null +++ b/x-pack/agent/pkg/crypto/io.go @@ -0,0 +1,378 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package crypto + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha512" + "encoding/binary" + "fmt" + "io" + + "github.com/pkg/errors" + "golang.org/x/crypto/pbkdf2" +) + +// Option is the default options used to generate the encrypt and decrypt writer. +// NOTE: the defined options need to be same for both the Reader and the writer. +type Option struct { + IterationsCount int + KeyLength int + SaltLength int + IVLength int + Generator bytesGen + + // BlockSize must be a factor of aes.BlockSize + BlockSize int +} + +// Validate the options for encoding and decoding values. +func (o *Option) Validate() error { + if o.IVLength == 0 { + return errors.New("IV length must be superior to 0") + } + + if o.SaltLength == 0 { + return errors.New("Salt length must be superior to 0") + } + + if o.IterationsCount == 0 { + return errors.New("IterationsCount must be superior to 0") + } + + if o.KeyLength == 0 { + return errors.New("KeyLength must be superior to 0") + } + + return nil +} + +// DefaultOptions is the default options to use when creating the writer, changing might decrease +// the efficacity of the encryption. +var DefaultOptions = &Option{ + IterationsCount: 10000, + KeyLength: 32, + SaltLength: 64, + IVLength: 12, + Generator: randomBytes, + BlockSize: bytes.MinRead, +} + +// versionMagicHeader is the format version that will be added at the begining of the header and +// can be used to change how the decryption work in future version. +var versionMagicHeader = []byte("v2") + +// Writer is an io.Writer implementation that will encrypt any data that it need to write, before +// writing any data to the wrapped writer it will lazy write an header with the necessary information +// to be able to decrypt the data. +type Writer struct { + option *Option + password []byte + writer io.Writer + generator bytesGen + + // internal + wroteHeader bool + err error + gcm cipher.AEAD + salt []byte +} +type bytesGen func(int) ([]byte, error) + +// NewWriter returns a new encrypted Writer. +func NewWriter(writer io.Writer, password []byte, option *Option) (*Writer, error) { + if err := option.Validate(); err != nil { + return nil, err + } + + var g bytesGen + if option.Generator == nil { + g = randomBytes + } else { + g = option.Generator + } + + salt, err := g(option.SaltLength) + if err != nil { + return nil, errors.Wrap(err, "fail to generate random password salt") + } + + return &Writer{ + option: option, + password: password, + generator: g, + writer: writer, + salt: salt, + }, nil +} + +// NewWriterWithDefaults create a new encryption writer with the defaults options. +func NewWriterWithDefaults(writer io.Writer, password []byte) (*Writer, error) { + return NewWriter(writer, password, DefaultOptions) +} + +// Write takes a byte slice and encrypt to the destination writer, it will return any errors when +// generating the header information or when we try to encode the data. +func (w *Writer) Write(b []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if !w.wroteHeader { + w.wroteHeader = true + + // Stretch the user provided key. + passwordBytes := stretchPassword( + w.password, + w.salt, + w.option.IterationsCount, + w.option.KeyLength, + ) + + // Select AES-256: because len(passwordBytes) == 32 bytes. + block, err := aes.NewCipher(passwordBytes) + if err != nil { + w.err = errors.Wrap(err, "could not create the cipher to encrypt") + return 0, w.err + } + + aesgcm, err := cipher.NewGCM(block) + if err != nil { + w.err = errors.Wrap(err, "could not create the GCM to encrypt") + return 0, w.err + } + + w.gcm = aesgcm + + // Write headers + // VERSION|SALT|IV|PAYLOAD + header := new(bytes.Buffer) + header.Write(versionMagicHeader) + header.Write(w.salt) + + n, err := w.writer.Write(header.Bytes()) + if err != nil { + w.err = errors.Wrap(err, "fail to write encoding information header") + return 0, w.err + } + + if n != len(header.Bytes()) { + w.err = errors.New("written bytes do not match header size") + } + + if err := w.writeBlock(b); err != nil { + return 0, errors.Wrap(err, "fail to write block") + } + + return len(b), err + } + + if err := w.writeBlock(b); err != nil { + return 0, errors.Wrap(err, "fail to write block") + } + + return len(b), nil +} + +func (w *Writer) writeBlock(b []byte) error { + + // randomly generate the salt and the initialization vector, this information will be saved + // on disk in the file as part of the header + iv, err := w.generator(w.option.IVLength) + if err != nil { + w.err = errors.Wrap(err, "fail to generate random IV") + return w.err + } + + w.writer.Write(iv) + + encodedBytes := w.gcm.Seal(nil, iv, b, nil) + + l := make([]byte, 4) + binary.LittleEndian.PutUint32(l, uint32(len(encodedBytes))) + w.writer.Write(l) + + _, err = w.writer.Write(encodedBytes) + if err != nil { + return errors.Wrap(err, "fail to encode data") + } + + return nil +} + +// Reader implements the io.Reader interface and allow to decrypt bytes from the Writer. The reader +// will lazy read the header from the wrapper reader to initialize everything required to decrypt +// the data. +type Reader struct { + option *Option + password []byte + reader io.Reader + + // internal + err error + readHeader bool + gcm cipher.AEAD + iv []byte + buf []byte + eof bool +} + +// NewReader returns a new decryption reader. +func NewReader(reader io.Reader, password []byte, option *Option) (*Reader, error) { + if reader == nil { + return nil, errors.New("missing reader") + } + + return &Reader{ + option: option, + password: password, + reader: reader, + }, nil +} + +// NewReaderWithDefaults create a decryption io.Reader with the default options. +func NewReaderWithDefaults(reader io.Reader, password []byte) (*Reader, error) { + return NewReader(reader, password, DefaultOptions) +} + +// Read reads the bytes from a wrapped io.Reader and will decrypt the content on the fly. +func (r *Reader) Read(b []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + // Lets read the header. + if !r.readHeader { + r.readHeader = true + vLen := len(versionMagicHeader) + buf := make([]byte, vLen+r.option.SaltLength) + n, err := io.ReadAtLeast(r.reader, buf, len(buf)) + if err != nil { + r.err = errors.Wrap(err, "fail to read encoding header") + return n, err + } + + v := buf[0:vLen] + if !bytes.Equal(versionMagicHeader, v) { + return 0, fmt.Errorf("unknown version %s (%+v)", string(v), v) + } + + salt := buf[vLen : vLen+r.option.SaltLength] + + // Stretch the user provided key. + passwordBytes := stretchPassword( + r.password, + salt, + r.option.IterationsCount, + r.option.KeyLength, + ) + + block, err := aes.NewCipher(passwordBytes) + if err != nil { + r.err = errors.Wrap(err, "could not create the cipher to decrypt the data") + return 0, r.err + } + + aesgcm, err := cipher.NewGCM(block) + if err != nil { + r.err = errors.Wrap(err, "could not create the GCM to decrypt the data") + return 0, r.err + } + r.gcm = aesgcm + } + + return r.readTo(b) +} + +func (r *Reader) readTo(b []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if !r.eof { + if err := r.consumeBlock(); err != nil { + // We read all the blocks + if err == io.EOF || err == io.ErrUnexpectedEOF { + r.eof = true + } else { + r.err = err + return 0, err + } + } + } + + n := copy(b, r.buf) + r.buf = r.buf[n:] + + if r.eof && len(r.buf) == 0 { + r.err = io.EOF + } + + return n, r.err +} + +func (r *Reader) consumeBlock() error { + // Retrieve block information: + // - Initialization vector + // - Length of the block + iv, l, err := r.readBlockInfo() + if err != nil { + return err + } + + encodedBytes := make([]byte, l) + _, err = io.ReadAtLeast(r.reader, encodedBytes, int(l)) + if err != nil { + r.err = errors.Wrapf(err, "fail read the block of %d bytes", l) + } + + decodedBytes, err := r.gcm.Open(nil, iv, encodedBytes, nil) + if err != nil { + return errors.Wrap(err, "fail to decode bytes") + } + r.buf = append(r.buf[:], decodedBytes...) + + return nil +} + +func (r *Reader) readBlockInfo() ([]byte, int, error) { + buf := make([]byte, r.option.IVLength+4) + _, err := io.ReadAtLeast(r.reader, buf, len(buf)) + if err != nil { + return nil, 0, err + } + + iv := buf[0:r.option.IVLength] + l := binary.LittleEndian.Uint32(buf[r.option.IVLength:]) + + return iv, int(l), nil +} + +// Close will propagate the Close call to the wrapped reader. +func (r *Reader) Close() error { + a, ok := r.reader.(io.ReadCloser) + if ok { + return a.Close() + } + return nil +} + +func randomBytes(length int) ([]byte, error) { + r := make([]byte, length) + _, err := rand.Read(r) + + if err != nil { + return nil, err + } + + return r, nil +} + +func stretchPassword(password, salt []byte, c, kl int) []byte { + return pbkdf2.Key(password, salt, c, kl, sha512.New) +} diff --git a/x-pack/agent/pkg/crypto/io_test.go b/x-pack/agent/pkg/crypto/io_test.go new file mode 100644 index 00000000000..46847643010 --- /dev/null +++ b/x-pack/agent/pkg/crypto/io_test.go @@ -0,0 +1,195 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package crypto + +import ( + "bufio" + "bytes" + "io" + "io/ioutil" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestIO(t *testing.T) { + t.Run("encode and decode with the right password", func(t *testing.T) { + passwd := []byte("hello") + msg := []byte("bonjour la famille") + dest := new(bytes.Buffer) + + // Encode + w, err := NewWriterWithDefaults(dest, passwd) + require.NoError(t, err) + + n, err := w.Write(msg) + require.NoError(t, err) + require.Equal(t, len(msg), n) + + // Guard to make sure we have not the same bytes. + require.True(t, bytes.Index(dest.Bytes(), msg) == -1) + + r, err := NewReaderWithDefaults(dest, passwd) + require.NoError(t, err) + + content, err := ioutil.ReadAll(r) + require.NoError(t, err) + + require.Equal(t, msg, content) + }) + + t.Run("Large single write", func(t *testing.T) { + passwd := []byte("hello") + msg, err := randomBytes(1327) + + require.NoError(t, err) + dest := new(bytes.Buffer) + + // Encode + w, err := NewWriterWithDefaults(dest, passwd) + require.NoError(t, err) + + n, err := io.Copy(w, bytes.NewBuffer(msg)) + require.NoError(t, err) + require.Equal(t, int64(len(msg)), n) + + // Guard to make sure we have not the same bytes. + require.True(t, bytes.Index(dest.Bytes(), msg) == -1) + + r, err := NewReaderWithDefaults(dest, passwd) + require.NoError(t, err) + + content, err := ioutil.ReadAll(r) + require.NoError(t, err) + + require.Equal(t, msg, content) + }) + + t.Run("try to decode with the wrong password", func(t *testing.T) { + passwd := []byte("hello") + msg := []byte("bonjour la famille") + dest := new(bytes.Buffer) + + // Encode + w, err := NewWriterWithDefaults(dest, passwd) + require.NoError(t, err) + + n, err := w.Write(msg) + require.NoError(t, err) + require.Equal(t, len(msg), n) + + // Guard to make sure we have not the same bytes. + require.True(t, bytes.Index(dest.Bytes(), msg) == -1) + + r, err := NewReaderWithDefaults(dest, []byte("bad password")) + require.NoError(t, err) + + _, err = ioutil.ReadAll(r) + require.Error(t, err) + }) + + t.Run("Make sure that buffered IO works with the encoder", func(t *testing.T) { + passwd := []byte("hello") + msg, err := randomBytes(2048) + require.NoError(t, err) + dest := new(bytes.Buffer) + + // Encode + w, err := NewWriterWithDefaults(dest, passwd) + require.NoError(t, err) + + b := bufio.NewWriterSize(w, 100) + n, err := b.Write(msg) + require.NoError(t, err) + require.Equal(t, 2048, n) + // err = b.Flush() //force flush + require.NoError(t, err) + + require.True(t, len(dest.Bytes()) > 0) + + // Guard to make sure we have not the same bytes. + require.True(t, bytes.Index(dest.Bytes(), msg) == -1) + + r, err := NewReaderWithDefaults(dest, passwd) + require.NoError(t, err) + + content, err := ioutil.ReadAll(r) + require.NoError(t, err) + + require.Equal(t, msg, content) + }) + + t.Run("Make sure that buffered IO works with the decoder", func(t *testing.T) { + passwd := []byte("hello") + msg, err := randomBytes(2048) + require.NoError(t, err) + dest := new(bytes.Buffer) + + // Encode + w, err := NewWriterWithDefaults(dest, passwd) + require.NoError(t, err) + + n, err := w.Write(msg) + require.NoError(t, err) + require.True(t, n == 2048) + + // Guard to make sure we have not the same bytes. + require.True(t, bytes.Index(dest.Bytes(), msg) == -1) + + r, err := NewReaderWithDefaults(dest, passwd) + require.NoError(t, err) + + b := bufio.NewReaderSize(r, 100) + + content, err := ioutil.ReadAll(b) + require.NoError(t, err) + + require.Equal(t, msg, content) + }) + + t.Run("Missing explicit version", func(t *testing.T) { + raw, err := randomBytes(2048) + c := bytes.NewBuffer(raw) + + r, err := NewReaderWithDefaults(c, []byte("bad password")) + require.NoError(t, err) + + b := bufio.NewReaderSize(r, 100) + + _, err = ioutil.ReadAll(b) + require.Error(t, err) + }) + + t.Run("works with multiple writes", func(t *testing.T) { + passwd := []byte("hello") + + expected := []byte("hello world bonjour la famille") + + dest := new(bytes.Buffer) + + // Encode + w, err := NewWriterWithDefaults(dest, passwd) + require.NoError(t, err) + + n, err := w.Write([]byte("hello world")) + require.NoError(t, err) + require.Equal(t, 11, n) + + n, err = w.Write([]byte(" bonjour la famille")) + require.NoError(t, err) + require.Equal(t, 19, n) + + // Guard to make sure we have not the same bytes. + require.True(t, bytes.Index(dest.Bytes(), expected) == -1) + + r, err := NewReaderWithDefaults(dest, passwd) + require.NoError(t, err) + + content, err := ioutil.ReadAll(r) + require.NoError(t, err) + + require.Equal(t, expected, content) + }) +} diff --git a/x-pack/agent/pkg/dir/discover.go b/x-pack/agent/pkg/dir/discover.go new file mode 100644 index 00000000000..7ef889f23cd --- /dev/null +++ b/x-pack/agent/pkg/dir/discover.go @@ -0,0 +1,32 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package dir + +import ( + "path/filepath" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" +) + +// DiscoverFiles takes a slices of wildcards patterns and try to discover all the matching files +// recursively and will stop on any errors. +func DiscoverFiles(patterns ...string) ([]string, error) { + files := make([]string, 0) + for _, pattern := range patterns { + f, err := filepath.Glob(pattern) + if err != nil { + return files, errors.New(err, + "error while reading the glob pattern", + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, pattern)) + } + + if len(f) > 0 { + files = append(files, f...) + } + } + + return files, nil +} diff --git a/x-pack/agent/pkg/dir/discover_test.go b/x-pack/agent/pkg/dir/discover_test.go new file mode 100644 index 00000000000..ad7cb21bf68 --- /dev/null +++ b/x-pack/agent/pkg/dir/discover_test.go @@ -0,0 +1,71 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package dir + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDiscover(t *testing.T) { + t.Run("support wildcards patterns", withFiles([]string{"hello", "helllooo"}, func( + dst string, + t *testing.T, + ) { + r, err := DiscoverFiles(filepath.Join(dst, "hel*")) + require.NoError(t, err) + assert.Equal(t, 2, len(r)) + })) + + t.Run("support direct file", withFiles([]string{"hello", "helllooo"}, func( + dst string, + t *testing.T, + ) { + r, err := DiscoverFiles(filepath.Join(dst, "hello")) + require.NoError(t, err) + assert.Equal(t, 1, len(r)) + })) + + t.Run("support direct file and pattern", withFiles([]string{"hello", "helllooo", "agent.yml"}, func( + dst string, + t *testing.T, + ) { + r, err := DiscoverFiles( + filepath.Join(dst, "hel*"), + filepath.Join(dst, "agent.yml"), + ) + require.NoError(t, err) + assert.Equal(t, 3, len(r)) + })) + + t.Run("support direct file and pattern", withFiles([]string{"hello", "helllooo", "agent.yml"}, func( + dst string, + t *testing.T, + ) { + r, err := DiscoverFiles(filepath.Join(dst, "donotmatch.yml")) + require.NoError(t, err) + assert.Equal(t, 0, len(r)) + })) +} + +func withFiles(files []string, fn func(dst string, t *testing.T)) func(t *testing.T) { + return func(t *testing.T) { + tmp, _ := ioutil.TempDir("", "watch") + defer os.RemoveAll(tmp) + + for _, file := range files { + path := filepath.Join(tmp, file) + empty, _ := os.Create(path) + empty.Close() + } + + fn(tmp, t) + } +} diff --git a/x-pack/agent/pkg/filewatcher/watcher.go b/x-pack/agent/pkg/filewatcher/watcher.go new file mode 100644 index 00000000000..cb64d6f2a60 --- /dev/null +++ b/x-pack/agent/pkg/filewatcher/watcher.go @@ -0,0 +1,260 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package filewatcher + +import ( + "bytes" + "crypto/sha256" + "io" + "os" + "sort" + "sync" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +// Comparer receives a file and the saved information about that file from the previous scan, +// it's return true if the values are identical and will return the new state object to persist. +type Comparer func(string, interface{}) (bool, interface{}, error) + +// DefaultComparer is the default comparer used by the watch. +var DefaultComparer = ContentComparer + +type fileinfo struct { + watched bool + record interface{} +} + +// Status is returned when you call Update() on the filewatched and will contains informations about +// the unwatched files, the unchanged files, the updated files and if an update is required. +type Status struct { + Updated []string + Unchanged []string + Unwatched []string + NeedUpdate bool +} + +// Watch allow to watch a set of file on disk and periodically scan if the files is different +// than the last time we have seen the files. NOTE: The Watch implementation is not threadsafe. +type Watch struct { + mu sync.Mutex + log *logger.Logger + logbook map[string]fileinfo + comparer Comparer +} + +// New returns a new Watch that will watch for file changes. +func New(log *logger.Logger, f Comparer) (*Watch, error) { + var err error + if log == nil { + log, err = logger.New() + if err != nil { + return nil, err + } + } + + return &Watch{log: log, logbook: make(map[string]fileinfo), comparer: f}, nil +} + +// Watch add a new files to the list of files to watch on disk. +// NOTE: If we already know the file we will just keep the old record. +func (w *Watch) Watch(file string) { + w.mu.Lock() + defer w.mu.Unlock() + v, ok := w.logbook[file] + if !ok { + w.logbook[file] = fileinfo{watched: true} + } + v.watched = true + w.logbook[file] = v +} + +// Cleanup removed all unwatched files from the logbook. +func (w *Watch) Cleanup() []string { + w.mu.Lock() + defer w.mu.Unlock() + var removed []string + for file, info := range w.logbook { + if !info.watched { + removed = append(removed, file) + delete(w.logbook, file) + } + } + return removed +} + +// Update returns multiples list, updated file, unchanged files and unwatched files. +// - Check if we are watching new files. +// - Check if we unwatch some files. +// - Check if watched files have changed. +func (w *Watch) Update() (Status, error) { + var ( + updated []string + unchanged []string + unwatched []string + needUpdate bool + err error + ) + + unwatched = w.Cleanup() + + if len(unwatched) > 0 { + needUpdate = true + } + + updated, unchanged, err = w.scan() + if err != nil { + return Status{}, err + } + + if len(updated) > 0 { + needUpdate = true + } + + return Status{ + Updated: updated, + Unchanged: unchanged, + Unwatched: unwatched, + NeedUpdate: needUpdate, + }, nil +} + +// Reset mark all the files in the logbook as unwatched. +func (w *Watch) Reset() { + w.mu.Lock() + defer w.mu.Unlock() + for file, info := range w.logbook { + info.watched = false + w.logbook[file] = info + } +} + +// IsWatching returns true if the files is currently watch. +func (w *Watch) IsWatching(file string) bool { + w.mu.Lock() + defer w.mu.Unlock() + _, ok := w.logbook[file] + return ok +} + +// Watched returns the list of watched files. +func (w *Watch) Watched() []string { + w.mu.Lock() + defer w.mu.Unlock() + watched := make([]string, 0, len(w.logbook)) + for file := range w.logbook { + watched = append(watched, file) + } + + sort.Strings(watched) + return watched +} + +// Unwatch remove a files from the list of file to keep track of changes. +func (w *Watch) Unwatch(file string) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.logbook, file) +} + +// scan goes through the list of files and verifies if they have been modified since we last check +// for them. +func (w *Watch) scan() (modifiedFiles []string, unchanged []string, err error) { + w.mu.Lock() + defer w.mu.Unlock() + for file, info := range w.logbook { + change, newRecord, err := w.comparer(file, info.record) + if err != nil { + return []string{}, []string{}, err + } + + if change { + info := w.logbook[file] + info.record = newRecord + w.logbook[file] = info + modifiedFiles = append(modifiedFiles, file) + } else { + unchanged = append(unchanged, file) + } + } + + return modifiedFiles, unchanged, nil +} + +// Invalidate invalidates the current cache. +func (w *Watch) Invalidate() { + w.mu.Lock() + defer w.mu.Unlock() + w.logbook = make(map[string]fileinfo) +} + +type record struct { + info os.FileInfo + checksum []byte +} + +// ContentComparer uses the last modified date of the file and a checksum of the content of the file +// to know if the files are differents and must be processed again. +func ContentComparer(file string, r interface{}) (bool, interface{}, error) { + stat, err := os.Stat(file) + if err != nil { + return false, nil, errors.New(err, + "could not get information about the file", + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, file)) + } + + // We never saw the file before. + if r == nil { + checksum, err := checksum(file) + if err != nil { + return false, nil, err + } + return true, record{info: stat, checksum: checksum}, nil + } + + // We already saw the file. + fileRecord := r.(record) + + // If the modification time is the same, we assume nothing was changed on disk. + if stat.ModTime().Sub(fileRecord.info.ModTime()) == 0 { + return false, fileRecord, nil + } + + checksum, err := checksum(file) + if err != nil { + return false, nil, err + } + + // content is the same, no change. + if bytes.Equal(checksum, fileRecord.checksum) { + return false, fileRecord, nil + } + + return true, record{info: stat, checksum: checksum}, nil +} + +func checksum(file string) ([]byte, error) { + // Mod time was changed on on the file, now lets looks at the content of the file. + f, err := os.Open(file) + if err != nil { + return nil, errors.New(err, + "could not open file", + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, file)) + } + + defer f.Close() + + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return nil, errors.New(err, + "could not generate checksum", + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, file)) + } + return h.Sum(nil), nil +} diff --git a/x-pack/agent/pkg/filewatcher/watcher_test.go b/x-pack/agent/pkg/filewatcher/watcher_test.go new file mode 100644 index 00000000000..140376ca197 --- /dev/null +++ b/x-pack/agent/pkg/filewatcher/watcher_test.go @@ -0,0 +1,228 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package filewatcher + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWatch(t *testing.T) { + t.Run("no files are watched", withWatch(func(t *testing.T, w *Watch) { + r, u, err := w.scan() + require.NoError(t, err) + assert.Equal(t, 0, len(r)) + assert.Equal(t, 0, len(u)) + })) + + t.Run("newly added files are discovered", withWatch(func(t *testing.T, w *Watch) { + tmp, err := ioutil.TempDir("", "watch") + require.NoError(t, err) + defer os.RemoveAll(tmp) + + path := filepath.Join(tmp, "hello.txt") + empty, err := os.Create(path) + require.NoError(t, err) + empty.Close() + + // Register the file to watch. + w.Watch(path) + + r, _, err := w.scan() + require.NoError(t, err) + assert.Equal(t, 1, len(r)) + assert.Equal(t, r[0], path) + })) + + t.Run("ignore old files", withWatch(func(t *testing.T, w *Watch) { + tmp, err := ioutil.TempDir("", "watch") + require.NoError(t, err) + defer os.RemoveAll(tmp) + + path := filepath.Join(tmp, "hello.txt") + empty, err := os.Create(path) + require.NoError(t, err) + empty.Close() + + // Register the file to watch. + w.Watch(path) + + r, u, err := w.scan() + require.NoError(t, err) + assert.Equal(t, 1, len(r)) + assert.Equal(t, r[0], path) + assert.Equal(t, 0, len(u)) + + r, u, err = w.scan() + require.NoError(t, err) + assert.Equal(t, 0, len(r)) + assert.Equal(t, 1, len(u)) + })) + + t.Run("can unwatch a watched file", withWatch(func(t *testing.T, w *Watch) { + tmp, err := ioutil.TempDir("", "watch") + require.NoError(t, err) + defer os.RemoveAll(tmp) + + path := filepath.Join(tmp, "hello.txt") + empty, err := os.Create(path) + require.NoError(t, err) + empty.Close() + + // Register the file to watch. + w.Watch(path) + + // Initiall found + r, u, err := w.scan() + require.NoError(t, err) + assert.Equal(t, 1, len(r)) + assert.Equal(t, r[0], path) + assert.Equal(t, 0, len(u)) + + // Should not be returned since it's not modified. + r, u, err = w.scan() + require.NoError(t, err) + assert.Equal(t, 0, len(r)) + assert.Equal(t, 1, len(u)) + + // Unwatch the file + w.Unwatch(path) + + // Add new content to the file. + ioutil.WriteFile(path, []byte("heeeelo"), 0644) + + // Should not find the file. + r, u, err = w.scan() + require.NoError(t, err) + assert.Equal(t, 0, len(r)) + assert.Equal(t, 0, len(u)) + })) + + t.Run("can returns the list of watched files", withWatch(func(t *testing.T, w *Watch) { + tmp, err := ioutil.TempDir("", "watch") + require.NoError(t, err) + defer os.RemoveAll(tmp) + + path := filepath.Join(tmp, "hello.txt") + empty, err := os.Create(path) + require.NoError(t, err) + empty.Close() + + // Register the file to watch. + w.Watch(path) + + assert.Equal(t, 1, len(w.Watched())) + assert.Equal(t, path, w.Watched()[0]) + assert.True(t, w.IsWatching(path)) + })) + + t.Run("update returns updated, unchanged and watched files", withWatch(func(t *testing.T, w *Watch) { + tmp, err := ioutil.TempDir("", "watch") + require.NoError(t, err) + defer os.RemoveAll(tmp) + + path1 := filepath.Join(tmp, "hello-1.txt") + empty, err := os.Create(path1) + require.NoError(t, err) + empty.Close() + + // Register the file to watch. + w.Watch(path1) + + path2 := filepath.Join(tmp, "hello-2.txt") + empty, err = os.Create(path2) + require.NoError(t, err) + empty.Close() + + w.Watch(path2) + + path3 := filepath.Join(tmp, "hello-3.txt") + empty, err = os.Create(path3) + require.NoError(t, err) + empty.Close() + + w.Watch(path3) + + // Set initial state + w.Update() + + // Reset watched files. + w.Reset() + + // readd files + w.Watch(path2) + w.Watch(path3) + + // Try as much as possible to have content on disk. + <-time.After(1 * time.Second) + // Add new content to the file. + f, err := os.OpenFile(path3, os.O_APPEND|os.O_WRONLY, 0600) + require.NoError(t, err) + f.Write([]byte("more-hello")) + require.NoError(t, f.Sync()) + f.Close() + + s, _ := w.Update() + + require.Equal(t, 1, len(s.Updated)) + require.Equal(t, 1, len(s.Unchanged)) + require.Equal(t, 1, len(s.Unwatched)) + + require.True(t, s.NeedUpdate) + + assert.Equal(t, path1, s.Unwatched[0]) + assert.Equal(t, path3, s.Updated[0]) + assert.Equal(t, path2, s.Unchanged[0]) + })) + + t.Run("should cleanup files that disapear", withWatch(func(t *testing.T, w *Watch) { + tmp, err := ioutil.TempDir("", "watch") + require.NoError(t, err) + defer os.RemoveAll(tmp) + + path1 := filepath.Join(tmp, "hello.txt") + empty, err := os.Create(path1) + require.NoError(t, err) + empty.Close() + + w.Watch(path1) + require.True(t, w.IsWatching(path1)) + w.Reset() + w.Cleanup() + require.False(t, w.IsWatching(path1)) + })) + + t.Run("should allow to invalidate the cache ", withWatch(func(t *testing.T, w *Watch) { + tmp, err := ioutil.TempDir("", "watch") + require.NoError(t, err) + defer os.RemoveAll(tmp) + + path1 := filepath.Join(tmp, "hello.txt") + empty, err := os.Create(path1) + require.NoError(t, err) + empty.Close() + + w.Watch(path1) + require.True(t, w.IsWatching(path1)) + w.Invalidate() + require.True(t, len(w.Watched()) == 0) + })) +} + +func withWatch(fn func(t *testing.T, w *Watch)) func(*testing.T) { + return func(t *testing.T) { + w, err := New(nil, DefaultComparer) + if !assert.NoError(t, err) { + return + } + fn(t, w) + } +} diff --git a/x-pack/agent/pkg/fleetapi/ack_cmd.go b/x-pack/agent/pkg/fleetapi/ack_cmd.go new file mode 100644 index 00000000000..206a2171b33 --- /dev/null +++ b/x-pack/agent/pkg/fleetapi/ack_cmd.go @@ -0,0 +1,106 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleetapi + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" +) + +const ackPath = "/api/ingest_manager/fleet/agents/%s/acks" + +// AckRequest consists of multiple actions acked to fleet ui. +// POST /agents/{agentId}/acks +// Authorization: ApiKey {AgentAccessApiKey} +// { +// "action_ids": ["id1"] +// } +type AckRequest struct { + AgentID string `json:"agent_id"` + Actions []string `json:"action_ids"` +} + +// Validate validates the enrollment request before sending it to the API. +func (e *AckRequest) Validate() error { + return nil +} + +// AckResponse is the response send back from the server. +// 200 +// { +// "action": "acks", +// "success": true +// } +type AckResponse struct { + Action string `json:"action"` + Success bool `json:"success"` +} + +// Validate validates the response send from the server. +func (e *AckResponse) Validate() error { + return nil +} + +// AckCmd is a fleet API command. +type AckCmd struct { + client clienter + info agentInfo +} + +// NewAckCmd creates a new api command. +func NewAckCmd(info agentInfo, client clienter) *AckCmd { + return &AckCmd{ + client: client, + info: info, + } +} + +// Execute ACK of actions to the Fleet. +func (e *AckCmd) Execute(ctx context.Context, r *AckRequest) (*AckResponse, error) { + if err := r.Validate(); err != nil { + return nil, err + } + + b, err := json.Marshal(r) + if err != nil { + return nil, errors.New(err, + "fail to encode the ack request", + errors.TypeUnexpected) + } + + ap := fmt.Sprintf(ackPath, e.info.AgentID()) + resp, err := e.client.Send(ctx, "POST", ap, nil, nil, bytes.NewBuffer(b)) + if err != nil { + return nil, errors.New(err, + "fail to ack to fleet", + errors.TypeNetwork, + errors.M(errors.MetaKeyURI, ap)) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, extract(resp.Body) + } + + ackResponse := &AckResponse{} + decoder := json.NewDecoder(resp.Body) + if err := decoder.Decode(ackResponse); err != nil { + return nil, errors.New(err, + "fail to decode ack response", + errors.TypeNetwork, + errors.M(errors.MetaKeyURI, ap)) + } + + if err := ackResponse.Validate(); err != nil { + return nil, err + } + + return ackResponse, nil +} diff --git a/x-pack/agent/pkg/fleetapi/ack_cmd_test.go b/x-pack/agent/pkg/fleetapi/ack_cmd_test.go new file mode 100644 index 00000000000..7495d60dd0e --- /dev/null +++ b/x-pack/agent/pkg/fleetapi/ack_cmd_test.go @@ -0,0 +1,76 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleetapi + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAck(t *testing.T) { + const withAPIKey = "secret" + agentInfo := &agentinfo{} + + t.Run("Test ack roundtrip", withServerWithAuthClient( + func(t *testing.T) *http.ServeMux { + raw := ` +{ + "action": "ack", + "success": true +} +` + mux := http.NewServeMux() + path := fmt.Sprintf("/api/ingest_manager/fleet/agents/%s/acks", agentInfo.AgentID()) + mux.HandleFunc(path, authHandler(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + + responses := struct { + ActionIDs []string `json:"action_ids"` + }{} + + decoder := json.NewDecoder(r.Body) + defer r.Body.Close() + + err := decoder.Decode(&responses) + require.NoError(t, err) + + require.Equal(t, 1, len(responses.ActionIDs)) + + id := responses.ActionIDs[0] + require.Equal(t, "my-id", id) + + fmt.Fprintf(w, raw) + }, withAPIKey)) + return mux + }, withAPIKey, + func(t *testing.T, client clienter) { + action := &ActionPolicyChange{ + ActionID: "my-id", + ActionType: "POLICY_CHANGE", + Policy: map[string]interface{}{ + "id": "policy_id", + }, + } + + cmd := NewAckCmd(&agentinfo{}, client) + + request := AckRequest{ + Actions: []string{ + action.ID(), + }, + } + + r, err := cmd.Execute(context.Background(), &request) + require.NoError(t, err) + require.True(t, r.Success) + require.Equal(t, "ack", r.Action) + }, + )) +} diff --git a/x-pack/agent/pkg/fleetapi/action.go b/x-pack/agent/pkg/fleetapi/action.go new file mode 100644 index 00000000000..9d0e152bceb --- /dev/null +++ b/x-pack/agent/pkg/fleetapi/action.go @@ -0,0 +1,132 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleetapi + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" +) + +// Action base interface for all the implemented action from the fleet API. +type Action interface { + fmt.Stringer + Type() string + ID() string +} + +// ActionUnknown is an action that is not know by the current version of the Agent and we don't want +// to return an error at parsing time but at execution time we can report or ignore. +// +// NOTE: We only keep the original type and the action id, the payload of the event is dropped, we +// do this to make sure we do not leak any unwanted information. +type ActionUnknown struct { + originalType string + ActionID string + ActionType string +} + +// Type returns the type of the Action. +func (a *ActionUnknown) Type() string { + return "UNKNOWN" +} + +// ID returns the ID of the Action. +func (a *ActionUnknown) ID() string { + return a.ActionID +} + +func (a *ActionUnknown) String() string { + var s strings.Builder + s.WriteString("action_id: ") + s.WriteString(a.ActionID) + s.WriteString(", type: ") + s.WriteString(a.ActionType) + s.WriteString(" (original type: ") + s.WriteString(a.OriginalType()) + s.WriteString(")") + return s.String() +} + +// OriginalType returns the original type of the action as returned by the API. +func (a *ActionUnknown) OriginalType() string { + return a.originalType +} + +// ActionPolicyChange is a request to apply a new +type ActionPolicyChange struct { + ActionID string + ActionType string + Policy map[string]interface{} `json:"policy"` +} + +func (a *ActionPolicyChange) String() string { + var s strings.Builder + s.WriteString("action_id: ") + s.WriteString(a.ActionID) + s.WriteString(", type: ") + s.WriteString(a.ActionType) + return s.String() +} + +// Type returns the type of the Action. +func (a *ActionPolicyChange) Type() string { + return a.ActionType +} + +// ID returns the ID of the Action. +func (a *ActionPolicyChange) ID() string { + return a.ActionID +} + +// Actions is a list of Actions to executes and allow to unmarshal heterogenous action type. +type Actions []Action + +// UnmarshalJSON takes every raw representation of an action and try to decode them. +func (a *Actions) UnmarshalJSON(data []byte) error { + type r struct { + ActionType string `json:"type"` + ActionID string `json:"id"` + Data json.RawMessage `json:"data"` + } + + var responses []r + + if err := json.Unmarshal(data, &responses); err != nil { + return errors.New(err, + "fail to decode actions", + errors.TypeConfig) + } + + actions := make([]Action, 0, len(responses)) + var action Action + + for _, response := range responses { + switch response.ActionType { + case "POLICY_CHANGE": + action = &ActionPolicyChange{ + ActionID: response.ActionID, + ActionType: response.ActionType, + } + if err := json.Unmarshal(response.Data, action); err != nil { + return errors.New(err, + "fail to decode POLICY_CHANGE action", + errors.TypeConfig) + } + default: + action = &ActionUnknown{ + ActionID: response.ActionID, + ActionType: "UNKNOWN", + originalType: response.ActionType, + } + } + actions = append(actions, action) + } + + *a = actions + return nil +} diff --git a/x-pack/agent/pkg/fleetapi/checkin_cmd.go b/x-pack/agent/pkg/fleetapi/checkin_cmd.go new file mode 100644 index 00000000000..f1fd230732a --- /dev/null +++ b/x-pack/agent/pkg/fleetapi/checkin_cmd.go @@ -0,0 +1,117 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleetapi + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" +) + +const checkingPath = "/api/ingest_manager/fleet/agents/%s/checkin" + +// CheckinRequest consists of multiple events reported to fleet ui. +type CheckinRequest struct { + Events []SerializableEvent `json:"events"` + Metadata map[string]interface{} `json:"local_metadata,omitempty"` +} + +// SerializableEvent is a representation of the event to be send to the Fleet API via the checkin +// endpoint, we are liberal into what we accept to be send you only need a type and be able to be +// serialized into JSON. +type SerializableEvent interface { + // Type return the type of the event, this must be included in the serialized document. + Type() string + + // Timestamp is used to keep track when the event was created in the system. + Timestamp() time.Time + + // Message is a human readable string to explain what the event does, this would be displayed in + // the UI as a string of text. + Message() string +} + +// Validate validates the enrollment request before sending it to the API. +func (e *CheckinRequest) Validate() error { + return nil +} + +// CheckinResponse is the response send back from the server which contains all the action that +// need to be executed or proxy to running processes. +type CheckinResponse struct { + Actions Actions `json:"actions"` + Success bool `json:"success"` +} + +// Validate validates the response send from the server. +func (e *CheckinResponse) Validate() error { + return nil +} + +// CheckinCmd is a fleet API command. +type CheckinCmd struct { + client clienter + info agentInfo +} + +type agentInfo interface { + AgentID() string +} + +// NewCheckinCmd creates a new api command. +func NewCheckinCmd(info agentInfo, client clienter) *CheckinCmd { + return &CheckinCmd{ + client: client, + info: info, + } +} + +// Execute enroll the Agent in the Fleet. +func (e *CheckinCmd) Execute(ctx context.Context, r *CheckinRequest) (*CheckinResponse, error) { + if err := r.Validate(); err != nil { + return nil, err + } + + b, err := json.Marshal(r) + if err != nil { + return nil, errors.New(err, + "fail to encode the checkin request", + errors.TypeUnexpected) + } + + cp := fmt.Sprintf(checkingPath, e.info.AgentID()) + resp, err := e.client.Send(ctx, "POST", cp, nil, nil, bytes.NewBuffer(b)) + if err != nil { + return nil, errors.New(err, + "fail to checkin to fleet", + errors.TypeNetwork, + errors.M(errors.MetaKeyURI, cp)) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, extract(resp.Body) + } + + checkinResponse := &CheckinResponse{} + decoder := json.NewDecoder(resp.Body) + if err := decoder.Decode(checkinResponse); err != nil { + return nil, errors.New(err, + "fail to decode checkin response", + errors.TypeNetwork, + errors.M(errors.MetaKeyURI, cp)) + } + + if err := checkinResponse.Validate(); err != nil { + return nil, err + } + + return checkinResponse, nil +} diff --git a/x-pack/agent/pkg/fleetapi/checkin_cmd_test.go b/x-pack/agent/pkg/fleetapi/checkin_cmd_test.go new file mode 100644 index 00000000000..3f92feac692 --- /dev/null +++ b/x-pack/agent/pkg/fleetapi/checkin_cmd_test.go @@ -0,0 +1,293 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleetapi + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type agentinfo struct{} + +func (*agentinfo) AgentID() string { return "id" } + +func TestCheckin(t *testing.T) { + const withAPIKey = "secret" + ctx := context.Background() + agentInfo := &agentinfo{} + + t.Run("Propagate any errors from the server", withServerWithAuthClient( + func(t *testing.T) *http.ServeMux { + raw := ` +Something went wrong +} +` + mux := http.NewServeMux() + path := fmt.Sprintf("/api/ingest_manager/fleet/agents/%s/checkin", agentInfo.AgentID()) + mux.HandleFunc(path, authHandler(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, raw) + }, withAPIKey)) + return mux + }, withAPIKey, + func(t *testing.T, client clienter) { + cmd := NewCheckinCmd(agentInfo, client) + + request := CheckinRequest{} + + _, err := cmd.Execute(ctx, &request) + require.Error(t, err) + }, + )) + + t.Run("Checkin receives a PolicyChange", withServerWithAuthClient( + func(t *testing.T) *http.ServeMux { + raw := ` +{ + "actions": [ + { + "type": "POLICY_CHANGE", + "id": "id1", + "data": { + "policy": { + "id": "policy-id", + "outputs": { + "default": { + "hosts": "https://localhost:9200" + } + }, + "streams": [ + { + "id": "string", + "type": "logs", + "path": "/var/log/hello.log", + "output": { + "use_output": "default" + } + } + ] + } + } + } + ], + "success": true +} +` + mux := http.NewServeMux() + path := fmt.Sprintf("/api/ingest_manager/fleet/agents/%s/checkin", agentInfo.AgentID()) + mux.HandleFunc(path, authHandler(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, raw) + }, withAPIKey)) + return mux + }, withAPIKey, + func(t *testing.T, client clienter) { + cmd := NewCheckinCmd(agentInfo, client) + + request := CheckinRequest{} + + r, err := cmd.Execute(ctx, &request) + require.NoError(t, err) + require.True(t, r.Success) + + require.Equal(t, 1, len(r.Actions)) + + // ActionPolicyChange + require.Equal(t, "id1", r.Actions[0].ID()) + require.Equal(t, "POLICY_CHANGE", r.Actions[0].Type()) + }, + )) + + t.Run("Checkin receives known and unknown action type", withServerWithAuthClient( + func(t *testing.T) *http.ServeMux { + raw := ` +{ + "actions": [ + { + "type": "POLICY_CHANGE", + "id": "id1", + "data": { + "policy": { + "id": "policy-id", + "outputs": { + "default": { + "hosts": "https://localhost:9200" + } + }, + "streams": [ + { + "id": "string", + "type": "logs", + "path": "/var/log/hello.log", + "output": { + "use_output": "default" + } + } + ] + } + } + }, + { + "type": "WHAT_TO_DO_WITH_IT", + "id": "id2" + } + ], + "success": true +} +` + mux := http.NewServeMux() + path := fmt.Sprintf("/api/ingest_manager/fleet/agents/%s/checkin", agentInfo.AgentID()) + mux.HandleFunc(path, authHandler(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, raw) + }, withAPIKey)) + return mux + }, withAPIKey, + func(t *testing.T, client clienter) { + cmd := NewCheckinCmd(agentInfo, client) + + request := CheckinRequest{} + + r, err := cmd.Execute(ctx, &request) + require.NoError(t, err) + require.True(t, r.Success) + + require.Equal(t, 2, len(r.Actions)) + + // ActionPolicyChange + require.Equal(t, "id1", r.Actions[0].ID()) + require.Equal(t, "POLICY_CHANGE", r.Actions[0].Type()) + + // UnknownAction + require.Equal(t, "id2", r.Actions[1].ID()) + require.Equal(t, "UNKNOWN", r.Actions[1].Type()) + require.Equal(t, "WHAT_TO_DO_WITH_IT", r.Actions[1].(*ActionUnknown).OriginalType()) + }, + )) + + t.Run("When we receive no action", withServerWithAuthClient( + func(t *testing.T) *http.ServeMux { + raw := ` +{ + "actions": [], + "success": true +} +` + mux := http.NewServeMux() + path := fmt.Sprintf("/api/ingest_manager/fleet/agents/%s/checkin", agentInfo.AgentID()) + mux.HandleFunc(path, authHandler(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, raw) + }, withAPIKey)) + return mux + }, withAPIKey, + func(t *testing.T, client clienter) { + cmd := NewCheckinCmd(agentInfo, client) + + request := CheckinRequest{} + + r, err := cmd.Execute(ctx, &request) + require.NoError(t, err) + require.True(t, r.Success) + + require.Equal(t, 0, len(r.Actions)) + }, + )) + + t.Run("Meta are sent", withServerWithAuthClient( + func(t *testing.T) *http.ServeMux { + raw := ` +{ + "actions": [], + "success": true +} +` + mux := http.NewServeMux() + path := fmt.Sprintf("/api/ingest_manager/fleet/agents/%s/checkin", agentInfo.AgentID()) + mux.HandleFunc(path, authHandler(func(w http.ResponseWriter, r *http.Request) { + type Request struct { + Metadata map[string]interface{} `json:"local_metadata"` + } + req := &Request{} + + content, err := ioutil.ReadAll(r.Body) + assert.NoError(t, err) + assert.NoError(t, json.Unmarshal(content, &req)) + + assert.Equal(t, 1, len(req.Metadata)) + v, found := req.Metadata["key"] + assert.True(t, found) + + intV, ok := v.(string) + assert.True(t, ok) + assert.Equal(t, "value", intV) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, raw) + }, withAPIKey)) + return mux + }, withAPIKey, + func(t *testing.T, client clienter) { + meta := map[string]interface{}{ + "key": "value", + } + + cmd := NewCheckinCmd(agentInfo, client) + + request := CheckinRequest{Metadata: meta} + + r, err := cmd.Execute(ctx, &request) + require.NoError(t, err) + require.True(t, r.Success) + + require.Equal(t, 0, len(r.Actions)) + }, + )) + + t.Run("No meta are sent when not provided", withServerWithAuthClient( + func(t *testing.T) *http.ServeMux { + raw := ` +{ + "actions": [], + "success": true +} +` + mux := http.NewServeMux() + path := fmt.Sprintf("/api/ingest_manager/fleet/agents/%s/checkin", agentInfo.AgentID()) + mux.HandleFunc(path, authHandler(func(w http.ResponseWriter, r *http.Request) { + req := make(map[string]interface{}) + + content, err := ioutil.ReadAll(r.Body) + assert.NoError(t, err) + assert.NoError(t, json.Unmarshal(content, &req)) + + _, found := req["key"] + assert.False(t, found) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, raw) + }, withAPIKey)) + return mux + }, withAPIKey, + func(t *testing.T, client clienter) { + cmd := NewCheckinCmd(agentInfo, client) + + request := CheckinRequest{} + + r, err := cmd.Execute(ctx, &request) + require.NoError(t, err) + require.True(t, r.Success) + + require.Equal(t, 0, len(r.Actions)) + }, + )) +} diff --git a/x-pack/agent/pkg/fleetapi/client.go b/x-pack/agent/pkg/fleetapi/client.go new file mode 100644 index 00000000000..d756c05c2d4 --- /dev/null +++ b/x-pack/agent/pkg/fleetapi/client.go @@ -0,0 +1,121 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleetapi + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/kibana" + "github.com/elastic/beats/v7/x-pack/agent/pkg/release" +) + +type clienter interface { + Send( + ctx context.Context, + method string, + path string, + params url.Values, + headers http.Header, + body io.Reader, + ) (*http.Response, error) +} + +var baseRoundTrippers = func(rt http.RoundTripper) (http.RoundTripper, error) { + rt = NewFleetUserAgentRoundTripper(rt, release.Version()) + rt = kibana.NewEnforceKibanaVersionRoundTripper(rt, release.Version()) + return rt, nil +} + +func init() { + val, ok := os.LookupEnv("DEBUG_AGENT") + if ok && val == "1" { + fn := baseRoundTrippers + baseRoundTrippers = func(rt http.RoundTripper) (http.RoundTripper, error) { + rt, err := fn(rt) + if err != nil { + return nil, err + } + + l, err := logger.New() + if err != nil { + return nil, errors.New(err, "could not create the logger for debugging HTTP request") + } + + return kibana.NewDebugRoundTripper(rt, l), nil + } + } +} + +// NewAuthWithConfig returns a Kibana client that will: +// +// - Send the API Key on every HTTP request. +// - Ensure a minimun version of Kibana is required. +// - Send the Fleet User Agent on every HTTP request. +func NewAuthWithConfig(log *logger.Logger, apiKey string, cfg *kibana.Config) (*kibana.Client, error) { + return kibana.NewWithConfig(log, cfg, func(rt http.RoundTripper) (http.RoundTripper, error) { + rt, err := baseRoundTrippers(rt) + if err != nil { + return nil, err + } + + rt, err = NewFleetAuthRoundTripper(rt, apiKey) + if err != nil { + return nil, err + } + + return rt, nil + }) +} + +// NewWithRawConfig create a non authenticated clients. +func NewWithRawConfig(log *logger.Logger, config *config.Config) (*kibana.Client, error) { + return kibana.NewWithRawConfig(log, config, baseRoundTrippers) +} + +// NewWithConfig takes a Kibana configuration and create a kibana.client with the appropriate tripper. +func NewWithConfig(log *logger.Logger, cfg *kibana.Config) (*kibana.Client, error) { + return kibana.NewWithConfig(log, cfg, baseRoundTrippers) +} + +func extract(resp io.Reader) error { + // Lets try to extract a high level Kibana error. + e := &struct { + StatusCode int `json:"statusCode"` + Error string `json:"error"` + Message string `json:"message"` + }{} + + data, err := ioutil.ReadAll(resp) + if err != nil { + return errors.New(err, "fail to read original error") + } + + err = json.Unmarshal(data, e) + if err == nil { + // System errors doesn't return a message, fleet code can return a Message key which has more + // information. + if len(e.Message) == 0 { + return fmt.Errorf("Status code: %d, Kibana returned an error: %s", e.StatusCode, e.Error) + } + return fmt.Errorf( + "Status code: %d, Kibana returned an error: %s, message: %s", + e.StatusCode, + e.Error, + e.Message, + ) + } + + return fmt.Errorf("could not decode the response, raw response: %s", string(data)) +} diff --git a/x-pack/agent/pkg/fleetapi/client_test.go b/x-pack/agent/pkg/fleetapi/client_test.go new file mode 100644 index 00000000000..e60496b8673 --- /dev/null +++ b/x-pack/agent/pkg/fleetapi/client_test.go @@ -0,0 +1,179 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleetapi + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/kibana" + "github.com/elastic/beats/v7/x-pack/agent/pkg/release" +) + +func TestHTTPClient(t *testing.T) { + ctx := context.Background() + + t.Run("Ensure we validate the remote Kibana version is higher or equal", withServer( + func(t *testing.T) *http.ServeMux { + msg := `{ message: "hello" }` + mux := http.NewServeMux() + mux.HandleFunc("/echo-hello", authHandler(func(w http.ResponseWriter, r *http.Request) { + v := r.Header.Get("kbn-version") + assert.Equal(t, release.Version(), v) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, msg) + }, "abc123")) + return mux + }, func(t *testing.T, host string) { + cfg := &kibana.Config{ + Host: host, + } + + l, err := logger.New() + client, err := NewAuthWithConfig(l, "abc123", cfg) + require.NoError(t, err) + resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + require.NoError(t, err) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, `{ message: "hello" }`, string(body)) + }, + )) + + t.Run("API Key is valid", withServer( + func(t *testing.T) *http.ServeMux { + msg := `{ message: "hello" }` + mux := http.NewServeMux() + mux.HandleFunc("/echo-hello", authHandler(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, msg) + }, "abc123")) + return mux + }, func(t *testing.T, host string) { + cfg := config.MustNewConfigFrom(map[string]interface{}{ + "host": host, + }) + + client, err := kibana.NewWithRawConfig(nil, cfg, func(wrapped http.RoundTripper) (http.RoundTripper, error) { + return NewFleetAuthRoundTripper(wrapped, "abc123") + }) + + require.NoError(t, err) + resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + require.NoError(t, err) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, `{ message: "hello" }`, string(body)) + }, + )) + + t.Run("API Key is not valid", withServer( + func(t *testing.T) *http.ServeMux { + msg := `{ message: "hello" }` + mux := http.NewServeMux() + mux.HandleFunc("/echo-hello", authHandler(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, msg) + }, "secret")) + return mux + }, func(t *testing.T, host string) { + cfg := config.MustNewConfigFrom(map[string]interface{}{ + "host": host, + }) + + client, err := kibana.NewWithRawConfig(nil, cfg, func(wrapped http.RoundTripper) (http.RoundTripper, error) { + return NewFleetAuthRoundTripper(wrapped, "abc123") + }) + + require.NoError(t, err) + _, err = client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + require.Error(t, err) + }, + )) + + t.Run("Fleet user agent", withServer( + func(t *testing.T) *http.ServeMux { + msg := `{ message: "hello" }` + mux := http.NewServeMux() + mux.HandleFunc("/echo-hello", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, msg) + require.Equal(t, r.Header.Get("User-Agent"), "Beat Agent v8.0.0") + }) + return mux + }, func(t *testing.T, host string) { + cfg := config.MustNewConfigFrom(map[string]interface{}{ + "host": host, + }) + + client, err := kibana.NewWithRawConfig(nil, cfg, func(wrapped http.RoundTripper) (http.RoundTripper, error) { + return NewFleetUserAgentRoundTripper(wrapped, "8.0.0"), nil + }) + + require.NoError(t, err) + resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + require.NoError(t, err) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, `{ message: "hello" }`, string(body)) + }, + )) + + t.Run("Fleet endpoint is not responding", func(t *testing.T) { + cfg := config.MustNewConfigFrom(map[string]interface{}{ + "host": "127.0.0.0:7278", + }) + + timeoutCtx, cancel := context.WithTimeout(ctx, 3*time.Second) + defer cancel() + client, err := kibana.NewWithRawConfig(nil, cfg, func(wrapped http.RoundTripper) (http.RoundTripper, error) { + return NewFleetAuthRoundTripper(wrapped, "abc123") + }) + + _, err = client.Send(timeoutCtx, "GET", "/echo-hello", nil, nil, nil) + require.Error(t, err) + }) +} + +// NOTE(ph): Usually I would be against testing private methods as much as possible but in this +// case since we might deal with different format or error I make sense to test this method in +// isolation. +func TestExtract(t *testing.T) { + // The error before is returned when an exception or an internal occur in Kibana, they + // are not only generated by the Fleet app. + t.Run("standard high level kibana errors", func(t *testing.T) { + err := extract(strings.NewReader(`{ "statusCode": 500, "Internal Server Error"}`)) + assert.True(t, strings.Index(err.Error(), "500") > 0) + assert.True(t, strings.Index(err.Error(), "Internal Server Error") > 0) + }) + + t.Run("proxy or non json response", func(t *testing.T) { + err := extract(strings.NewReader("Bad Request")) + assert.True(t, strings.Index(err.Error(), "Bad Request") > 0) + }) + + t.Run("Fleet generated errors", func(t *testing.T) { + err := extract(strings.NewReader(`{"statusCode":400,"error":"Bad Request","message":"child \"metadata\" fails because [\"cal\" is not allowed]","validation":{"source":"payload","keys":["metadata.cal"]}}`)) + assert.True(t, strings.Index(err.Error(), "400") > 0) + assert.True(t, strings.Index(err.Error(), "Bad Request") > 0) + assert.True(t, strings.Index(err.Error(), "fails because") > 0) + }) +} diff --git a/x-pack/agent/pkg/fleetapi/custom_type.go b/x-pack/agent/pkg/fleetapi/custom_type.go new file mode 100644 index 00000000000..00532d159ab --- /dev/null +++ b/x-pack/agent/pkg/fleetapi/custom_type.go @@ -0,0 +1,20 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleetapi + +import ( + "encoding/json" + "time" +) + +const timeFormat = time.RFC3339Nano + +// Time is a custom time that impose the serialization format. +type Time time.Time + +// MarshalJSON make sure that all the times are serialized with the RFC3339 format. +func (t Time) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(t).Format(timeFormat)) +} diff --git a/x-pack/agent/pkg/fleetapi/custom_type_test.go b/x-pack/agent/pkg/fleetapi/custom_type_test.go new file mode 100644 index 00000000000..db38af6af88 --- /dev/null +++ b/x-pack/agent/pkg/fleetapi/custom_type_test.go @@ -0,0 +1,23 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleetapi + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestTimeSerialized(t *testing.T) { + then := time.Date( + 2020, 1, 8, 6, 30, 00, 651387237, time.UTC) + + b, err := json.Marshal(Time(then)) + require.NoError(t, err) + + require.Equal(t, "\"2020-01-08T06:30:00.651387237Z\"", string(b)) +} diff --git a/x-pack/agent/pkg/fleetapi/enroll_cmd.go b/x-pack/agent/pkg/fleetapi/enroll_cmd.go new file mode 100644 index 00000000000..45ffde76c76 --- /dev/null +++ b/x-pack/agent/pkg/fleetapi/enroll_cmd.go @@ -0,0 +1,212 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleetapi + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/hashicorp/go-multierror" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" +) + +// EnrollType is the type of enrollment to do with the agent. +type EnrollType string + +const ( + // PermanentEnroll is default enrollment type, by default an Agent is permanently enroll to Agent. + PermanentEnroll = EnrollType("PERMANENT") +) + +var mapEnrollType = map[string]EnrollType{ + "PERMANENT": PermanentEnroll, +} + +var reverseMapEnrollType = make(map[EnrollType]string) + +func init() { + for k, v := range mapEnrollType { + reverseMapEnrollType[v] = k + } +} + +// UnmarshalJSON unmarshal an enrollment type. +func (p *EnrollType) UnmarshalJSON(b []byte) error { + s := string(b) + if len(s) <= 2 { + return errors.New("invalid enroll type received") + } + s = s[1 : len(s)-1] + v, ok := mapEnrollType[s] + if !ok { + return fmt.Errorf("value of '%s' is an invalid enrollment type, supported type is 'PERMANENT'", s) + } + + *p = v + + return nil +} + +// MarshalJSON marshal an enrollType. +func (p EnrollType) MarshalJSON() ([]byte, error) { + v, ok := reverseMapEnrollType[p] + if !ok { + return nil, errors.New("cannot serialize unknown type") + } + + return json.Marshal(v) +} + +// EnrollRequest is the data required to enroll the agent into Fleet. +// +// Example: +// POST /api/ingest_manager/fleet/agents/enroll +// { +// "type": "PERMANENT", +// "metadata": { +// "local": { "os": "macos"}, +// "user_provided": { "region": "us-east"} +// } +// } +type EnrollRequest struct { + EnrollAPIKey string `json:"-"` + Type EnrollType `json:"type"` + SharedID string `json:"sharedId,omitempty"` + Metadata Metadata `json:"metadata"` +} + +// Metadata is a all the metadata send or received from the agent. +type Metadata struct { + Local map[string]interface{} `json:"local"` + UserProvided map[string]interface{} `json:"user_provided"` +} + +// Validate validates the enrollment request before sending it to the API. +func (e *EnrollRequest) Validate() error { + var err error + + if len(e.EnrollAPIKey) == 0 { + err = multierror.Append(err, errors.New("missing enrollment api key")) + } + + if len(e.Type) == 0 { + err = multierror.Append(err, errors.New("missing enrollment type")) + } + + return err +} + +// EnrollResponse is the data received after enrolling an Agent into fleet. +// +// Example: +// { +// "action": "created", +// "success": true, +// "item": { +// "id": "a4937110-e53e-11e9-934f-47a8e38a522c", +// "active": true, +// "policy_id": "default", +// "type": "PERMANENT", +// "enrolled_at": "2019-10-02T18:01:22.337Z", +// "user_provided_metadata": {}, +// "local_metadata": {}, +// "actions": [], +// "access_api_key": "API_KEY" +// } +// } +type EnrollResponse struct { + Action string `json:"action"` + Success bool `json:"success"` + Item EnrollItemResponse `json:"item"` +} + +// EnrollItemResponse item response. +type EnrollItemResponse struct { + ID string `json:"id"` + Active bool `json:"active"` + PolicyID string `json:"policy_id"` + Type EnrollType `json:"type"` + EnrolledAt time.Time `json:"enrolled_at"` + UserProvidedMetadata map[string]interface{} `json:"user_provided_metadata"` + LocalMetadata map[string]interface{} `json:"local_metadata"` + Actions []interface{} `json:"actions"` + AccessAPIKey string `json:"access_api_key"` +} + +// Validate validates the response send from the server. +func (e *EnrollResponse) Validate() error { + var err error + + if len(e.Item.ID) == 0 { + err = multierror.Append(err, errors.New("missing ID")) + } + + if len(e.Item.Type) == 0 { + err = multierror.Append(err, errors.New("missing enrollment type")) + } + + if len(e.Item.AccessAPIKey) == 0 { + err = multierror.Append(err, errors.New("access api key is missing")) + } + + return err +} + +// EnrollCmd is the command to be executed to enroll an agent into Fleet. +type EnrollCmd struct { + client clienter +} + +// Execute enroll the Agent in the Fleet. +func (e *EnrollCmd) Execute(ctx context.Context, r *EnrollRequest) (*EnrollResponse, error) { + const p = "/api/ingest_manager/fleet/agents/enroll" + const key = "Authorization" + const prefix = "ApiKey " + + if err := r.Validate(); err != nil { + return nil, err + } + + headers := map[string][]string{ + key: []string{prefix + r.EnrollAPIKey}, + } + + b, err := json.Marshal(r) + if err != nil { + return nil, errors.New(err, "fail to encode the enrollment request") + } + + resp, err := e.client.Send(ctx, "POST", p, nil, headers, bytes.NewBuffer(b)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, extract(resp.Body) + } + + enrollResponse := &EnrollResponse{} + decoder := json.NewDecoder(resp.Body) + if err := decoder.Decode(enrollResponse); err != nil { + return nil, errors.New(err, "fail to decode enrollment response") + } + + if err := enrollResponse.Validate(); err != nil { + return nil, err + } + + return enrollResponse, nil +} + +// NewEnrollCmd creates a new EnrollCmd. +func NewEnrollCmd(client clienter) *EnrollCmd { + return &EnrollCmd{client: client} +} diff --git a/x-pack/agent/pkg/fleetapi/enroll_cmd_test.go b/x-pack/agent/pkg/fleetapi/enroll_cmd_test.go new file mode 100644 index 00000000000..ef46d8c89e0 --- /dev/null +++ b/x-pack/agent/pkg/fleetapi/enroll_cmd_test.go @@ -0,0 +1,134 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleetapi + +import ( + "context" + "encoding/json" + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/kibana" +) + +func TestEnroll(t *testing.T) { + t.Run("Successful enroll", withServer( + func(t *testing.T) *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("/api/ingest_manager/fleet/agents/enroll", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + + // Assert Enrollment Token. + require.Equal(t, "ApiKey my-enrollment-api-key", r.Header.Get("Authorization")) + + decoder := json.NewDecoder(r.Body) + defer r.Body.Close() + + req := &EnrollRequest{} + err := decoder.Decode(req) + require.NoError(t, err) + + require.Equal(t, PermanentEnroll, req.Type) + require.Equal(t, "im-a-beat", req.SharedID) + require.Equal(t, Metadata{ + Local: map[string]interface{}{"os": "linux"}, + UserProvided: make(map[string]interface{}), + }, req.Metadata) + + response := &EnrollResponse{ + Action: "created", + Success: true, + Item: EnrollItemResponse{ + ID: "a4937110-e53e-11e9-934f-47a8e38a522c", + Active: true, + PolicyID: "default", + Type: PermanentEnroll, + EnrolledAt: time.Now(), + UserProvidedMetadata: make(map[string]interface{}), + LocalMetadata: make(map[string]interface{}), + AccessAPIKey: "my-access-api-key", + }, + } + + b, err := json.Marshal(response) + require.NoError(t, err) + + w.Write(b) + }) + return mux + }, func(t *testing.T, host string) { + cfg := config.MustNewConfigFrom(map[string]interface{}{ + "host": host, + }) + + client, err := kibana.NewWithRawConfig(nil, cfg, nil) + require.NoError(t, err) + + req := &EnrollRequest{ + Type: PermanentEnroll, + EnrollAPIKey: "my-enrollment-api-key", + SharedID: "im-a-beat", + Metadata: Metadata{ + Local: map[string]interface{}{ + "os": "linux", + }, + UserProvided: make(map[string]interface{}), + }, + } + + cmd := &EnrollCmd{client: client} + resp, err := cmd.Execute(context.Background(), req) + require.NoError(t, err) + + require.Equal(t, "my-access-api-key", resp.Item.AccessAPIKey) + require.Equal(t, "created", resp.Action) + require.True(t, resp.Success) + }, + )) + + t.Run("Raise back any server errors", withServer( + func(t *testing.T) *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("/api/ingest_manager/fleet/agents/enroll", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"statusCode": 500, "error":"Something is really bad here"}`)) + }) + return mux + }, func(t *testing.T, host string) { + cfg := config.MustNewConfigFrom(map[string]interface{}{ + "host": host, + }) + + client, err := kibana.NewWithRawConfig(nil, cfg, nil) + require.NoError(t, err) + + req := &EnrollRequest{ + Type: PermanentEnroll, + EnrollAPIKey: "my-enrollment-api-key", + SharedID: "im-a-beat", + Metadata: Metadata{ + Local: map[string]interface{}{ + "os": "linux", + }, + UserProvided: make(map[string]interface{}), + }, + } + + cmd := &EnrollCmd{client: client} + _, err = cmd.Execute(context.Background(), req) + require.Error(t, err) + + require.True(t, strings.Index(err.Error(), "500") > 0) + require.True(t, strings.Index(err.Error(), "Something is really bad here") > 0) + }, + )) +} diff --git a/x-pack/agent/pkg/fleetapi/helper_test.go b/x-pack/agent/pkg/fleetapi/helper_test.go new file mode 100644 index 00000000000..3c5c5bbd4b8 --- /dev/null +++ b/x-pack/agent/pkg/fleetapi/helper_test.go @@ -0,0 +1,64 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleetapi + +import ( + "net" + "net/http" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/kibana" +) + +func authHandler(handler http.HandlerFunc, apiKey string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + const key = "Authorization" + const prefix = "ApiKey " + + v := strings.TrimPrefix(r.Header.Get(key), prefix) + if v != apiKey { + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + handler(w, r) + } +} + +func withServer(m func(t *testing.T) *http.ServeMux, test func(t *testing.T, host string)) func(t *testing.T) { + return func(t *testing.T) { + listener, err := net.Listen("tcp", ":0") + require.NoError(t, err) + defer listener.Close() + + port := listener.Addr().(*net.TCPAddr).Port + + go http.Serve(listener, m(t)) + + test(t, "localhost:"+strconv.Itoa(port)) + } +} + +func withServerWithAuthClient( + m func(t *testing.T) *http.ServeMux, + apiKey string, + test func(t *testing.T, client clienter), +) func(t *testing.T) { + + return withServer(m, func(t *testing.T, host string) { + log, _ := logger.New() + cfg := &kibana.Config{ + Host: host, + } + + client, err := NewAuthWithConfig(log, apiKey, cfg) + require.NoError(t, err) + test(t, client) + }) +} diff --git a/x-pack/agent/pkg/fleetapi/round_trippers.go b/x-pack/agent/pkg/fleetapi/round_trippers.go new file mode 100644 index 00000000000..de46b42426e --- /dev/null +++ b/x-pack/agent/pkg/fleetapi/round_trippers.go @@ -0,0 +1,72 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleetapi + +import ( + "errors" + "net/http" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/kibana" +) + +// ErrInvalidAPIKey is returned when authentication fail to fleet. +var ErrInvalidAPIKey = errors.New("invalid api key to authenticate with fleet") + +// FleetUserAgentRoundTripper adds the Fleet user agent. +type FleetUserAgentRoundTripper struct { + rt http.RoundTripper + version string +} + +// RoundTrip adds the Fleet user agent string to every request. +func (r *FleetUserAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return r.rt.RoundTrip(req) +} + +// NewFleetUserAgentRoundTripper returns a FleetUserAgentRoundTripper that actually wrap the +// existing UserAgentRoundTripper with a specific string. +func NewFleetUserAgentRoundTripper(wrapped http.RoundTripper, version string) http.RoundTripper { + const name = "Beat Agent" + return &FleetUserAgentRoundTripper{ + rt: kibana.NewUserAgentRoundTripper(wrapped, name+" v"+version), + } +} + +// FleetAuthRoundTripper allow all calls to be authenticated using the api key. +// The token is added as a header key. +type FleetAuthRoundTripper struct { + rt http.RoundTripper + apiKey string +} + +// RoundTrip makes all the calls to the service authenticated. +func (r *FleetAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + const key = "Authorization" + const prefix = "ApiKey " + + req.Header.Set(key, prefix+r.apiKey) + resp, err := r.rt.RoundTrip(req) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusUnauthorized { + defer resp.Body.Close() + return nil, ErrInvalidAPIKey + } + + return resp, err +} + +// NewFleetAuthRoundTripper wrap an existing http.RoundTripper and adds the API in the header. +func NewFleetAuthRoundTripper( + wrapped http.RoundTripper, + apiKey string, +) (http.RoundTripper, error) { + if len(apiKey) == 0 { + return nil, errors.New("empty api key received") + } + return &FleetAuthRoundTripper{rt: wrapped, apiKey: apiKey}, nil +} diff --git a/x-pack/agent/pkg/id/generate.go b/x-pack/agent/pkg/id/generate.go new file mode 100644 index 00000000000..92b786aa59b --- /dev/null +++ b/x-pack/agent/pkg/id/generate.go @@ -0,0 +1,34 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package id + +import ( + "math/rand" + "sync" + "time" + + "github.com/oklog/ulid" +) + +// ID represents a unique ID. +type ID = ulid.ULID + +// rand.New is not threadsafe, so we create a pool of rand to speed up the id generation. +var randPool = sync.Pool{ + New: func() interface{} { + t := time.Now() + return rand.New(rand.NewSource(t.UnixNano())) + }, +} + +// Generate returns and ID or an error if we cannot generate an ID. +func Generate() (ID, error) { + r := randPool.Get().(*rand.Rand) + defer randPool.Put(r) + + t := time.Now() + entropy := ulid.Monotonic(r, 0) + return ulid.New(ulid.Timestamp(t), entropy) +} diff --git a/x-pack/agent/pkg/id/generate_test.go b/x-pack/agent/pkg/id/generate_test.go new file mode 100644 index 00000000000..8abef51830c --- /dev/null +++ b/x-pack/agent/pkg/id/generate_test.go @@ -0,0 +1,17 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package id + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGenerate(t *testing.T) { + id, err := Generate() + require.NoError(t, err) + require.NotNil(t, id) +} diff --git a/x-pack/agent/pkg/kibana/client.go b/x-pack/agent/pkg/kibana/client.go new file mode 100644 index 00000000000..e8d588edb5b --- /dev/null +++ b/x-pack/agent/pkg/kibana/client.go @@ -0,0 +1,220 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" + "time" + + "github.com/pkg/errors" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/transport" + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +const kibanaPort = 5601 + +type requestFunc func(string, string, url.Values, io.Reader) (*http.Request, error) +type wrapperFunc func(rt http.RoundTripper) (http.RoundTripper, error) + +type clienter interface { + Send( + method string, + path string, + params url.Values, + headers http.Header, + body io.Reader, + ) (*http.Response, error) + Close() error +} + +// Client wraps an http.Client and takes care of making the raw calls to kibana, the client should +// stay simple and specificals should be implemented in external action instead of adding new methods +// to the client. For authenticated calls or sending fields on every request, create customer RoundTripper +// implementations that will take care of the boiler plates. +type Client struct { + log *logger.Logger + request requestFunc + client http.Client + config *Config +} + +// New creates new Kibana API client. +func New( + log *logger.Logger, + factory requestFunc, + cfg *Config, + httpClient http.Client, +) (*Client, error) { + c := &Client{ + log: log, + request: factory, + client: httpClient, + config: cfg, + } + return c, nil +} + +// NewConfigFromURL returns a Kibana Config based on a received host. +func NewConfigFromURL(kURL string) (*Config, error) { + u, err := url.Parse(kURL) + if err != nil { + return nil, errors.Wrap(err, "could not parse Kibana url") + } + + var username, password string + if u.User != nil { + username = u.User.Username() + // _ is true when password is set. + password, _ = u.User.Password() + } + + c := defaultClientConfig() + c.Protocol = Protocol(u.Scheme) + c.Host = u.Host + c.Path = u.Path + c.Username = username + c.Password = password + + return &c, nil +} + +// NewWithRawConfig returns a new Kibana client with a specified configuration. +func NewWithRawConfig(log *logger.Logger, config *config.Config, wrapper wrapperFunc) (*Client, error) { + l := log + if l == nil { + log, err := logger.New() + if err != nil { + return nil, err + } + l = log + } + + cfg := &Config{} + if err := config.Unpack(cfg); err != nil { + return nil, errors.Wrap(err, "invidate configuration") + } + + return NewWithConfig(l, cfg, wrapper) +} + +// NewWithConfig takes a Kibana Config and return a client. +func NewWithConfig(log *logger.Logger, cfg *Config, wrapper wrapperFunc) (*Client, error) { + var transport http.RoundTripper + transport, err := makeTransport(cfg.Timeout, cfg.TLS) + if err != nil { + return nil, err + } + + if cfg.IsBasicAuth() { + // Pass basic auth credentials to all the underlying calls. + transport = NewBasicAuthRoundTripper(transport, cfg.Username, cfg.Password) + } + + if wrapper != nil { + transport, err = wrapper(transport) + if err != nil { + return nil, errors.Wrap(err, "fail to create transport client") + } + } + + httpClient := http.Client{ + Transport: transport, + Timeout: cfg.Timeout, + } + + // Normalize the URL with the path any spaces configured. + var p string + if len(cfg.SpaceID) > 0 { + p = strings.Join([]string{cfg.Path, cfg.SpaceID}, "/") + } else { + p = cfg.Path + } + + if !strings.HasSuffix(p, "/") { + p = p + "/" + } + + kibanaURL, err := common.MakeURL(string(cfg.Protocol), p, cfg.Host, kibanaPort) + if err != nil { + return nil, errors.Wrap(err, "invalid Kibana endpoint") + } + + return New(log, prefixRequestFactory(kibanaURL), cfg, httpClient) +} + +// Send executes a direct calls agains't the Kibana API, the method will takes cares of cloning +// also add necessary headers for Kibana likes: "Content-Type", "Accept", and "kbn-xsrf". +// No assumptions is done on the response concerning the received format, this will be the responsability +// of the implementation to correctly unpack any received data. +// +// NOTE: +// - The caller of this method is free to overrides any values found in the headers. +// - The magic of unpack kibana errors is not done in the Send method, an helper methods is provided. +func (c *Client) Send( + ctx context.Context, + method, path string, + params url.Values, + headers http.Header, + body io.Reader, +) (*http.Response, error) { + c.log.Debugf("Request method: %s, path: %s", method, path) + + req, err := c.request(method, path, params, body) + if err != nil { + return nil, errors.Wrapf(err, "fail to create HTTP request using method %s to %s", method, path) + } + + // Add generals headers to the request, we are dealing exclusively with JSON. + // Content-Type / Accepted type can be override from the called. + req.Header.Set("Content-Type", "application/json") + req.Header.Add("Accept", "application/json") + req.Header.Set("kbn-xsrf", "1") // Without this Kibana will refuse to answer the request. + + // copy headers. + for header, values := range headers { + for _, v := range values { + req.Header.Add(header, v) + } + } + + return c.client.Do(req.WithContext(ctx)) +} + +// URI returns the remote URI. +func (c *Client) URI() string { + return string(c.config.Protocol) + "://" + c.config.Host + "/" + c.config.Path +} + +func prefixRequestFactory(URL string) requestFunc { + return func(method, path string, params url.Values, body io.Reader) (*http.Request, error) { + path = strings.TrimPrefix(path, "/") + newPath := strings.Join([]string{URL, path, "?", params.Encode()}, "") + return http.NewRequest(method, newPath, body) + } +} + +// makeTransport create a transport object based on the TLS configuration. +func makeTransport(timeout time.Duration, tls *tlscommon.Config) (*http.Transport, error) { + tlsConfig, err := tlscommon.LoadTLSConfig(tls) + if err != nil { + return nil, errors.Wrap(err, "invalid TLS configuration") + } + dialer := transport.NetDialer(timeout) + tlsDialer, err := transport.TLSDialer(dialer, tlsConfig, timeout) + if err != nil { + return nil, errors.Wrap(err, "fail to create TLS dialer") + } + + // TODO: Dial is deprecated we need to move to DialContext. + return &http.Transport{Dial: dialer.Dial, DialTLS: tlsDialer.Dial}, nil +} diff --git a/x-pack/agent/pkg/kibana/client_test.go b/x-pack/agent/pkg/kibana/client_test.go new file mode 100644 index 00000000000..51d29f29813 --- /dev/null +++ b/x-pack/agent/pkg/kibana/client_test.go @@ -0,0 +1,326 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "net" + "net/http" + "strconv" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +func noopWrapper(rt http.RoundTripper) (http.RoundTripper, error) { + return rt, nil +} + +func addCatchAll(mux *http.ServeMux, t *testing.T) *http.ServeMux { + mux.HandleFunc("/", func(_ http.ResponseWriter, _ *http.Request) { + t.Fatal("HTTP catch all handled called") + }) + return mux +} + +// - Prefix. +func TestHTTPClient(t *testing.T) { + ctx := context.Background() + l, err := logger.New() + require.NoError(t, err) + + t.Run("Guard against double slashes on path", withServer( + func(t *testing.T) *http.ServeMux { + msg := `{ message: "hello" }` + mux := http.NewServeMux() + mux.HandleFunc("/nested/echo-hello", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, msg) + }) + return addCatchAll(mux, t) + }, func(t *testing.T, host string) { + // Add a slashes at the end of the URL, internally we should prevent having double slashes + // when adding path to the request. + url := "http://" + host + "/" + + c, err := NewConfigFromURL(url) + client, err := NewWithConfig(l, c, noopWrapper) + + require.NoError(t, err) + resp, err := client.Send(ctx, "GET", "/nested/echo-hello", nil, nil, nil) + require.NoError(t, err) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, `{ message: "hello" }`, string(body)) + }, + )) + + t.Run("Simple call", withServer( + func(t *testing.T) *http.ServeMux { + msg := `{ message: "hello" }` + mux := http.NewServeMux() + mux.HandleFunc("/echo-hello", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, msg) + }) + return mux + }, func(t *testing.T, host string) { + cfg := config.MustNewConfigFrom(map[string]interface{}{ + "host": host, + }) + + client, err := NewWithRawConfig(nil, cfg, nil) + require.NoError(t, err) + resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + require.NoError(t, err) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, `{ message: "hello" }`, string(body)) + }, + )) + + t.Run("Simple call with a prefix path", withServer( + func(t *testing.T) *http.ServeMux { + msg := `{ message: "hello" }` + mux := http.NewServeMux() + mux.HandleFunc("/mycustompath/echo-hello", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, msg) + }) + return mux + }, func(t *testing.T, host string) { + cfg := config.MustNewConfigFrom(map[string]interface{}{ + "host": host, + "path": "mycustompath", + }) + + client, err := NewWithRawConfig(nil, cfg, nil) + require.NoError(t, err) + resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + require.NoError(t, err) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, `{ message: "hello" }`, string(body)) + }, + )) + + t.Run("Basic auth when credentials are valid", withServer( + func(t *testing.T) *http.ServeMux { + msg := `{ message: "hello" }` + mux := http.NewServeMux() + mux.HandleFunc("/echo-hello", basicAuthHandler(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, msg) + }, "hello", "world", "testing")) + return mux + }, func(t *testing.T, host string) { + cfg := config.MustNewConfigFrom(map[string]interface{}{ + "username": "hello", + "password": "world", + "host": host, + }) + + client, err := NewWithRawConfig(nil, cfg, nil) + require.NoError(t, err) + resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + require.NoError(t, err) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, `{ message: "hello" }`, string(body)) + }, + )) + + t.Run("Basic auth when credentials are invalid", withServer( + func(t *testing.T) *http.ServeMux { + msg := `{ message: "hello" }` + mux := http.NewServeMux() + mux.HandleFunc("/echo-hello", basicAuthHandler(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, msg) + }, "hello", "world", "testing")) + return mux + }, func(t *testing.T, host string) { + cfg := config.MustNewConfigFrom(map[string]interface{}{ + "username": "bye", + "password": "world", + "host": host, + }) + + client, err := NewWithRawConfig(nil, cfg, nil) + require.NoError(t, err) + resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + require.NoError(t, err) + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + }, + )) + + t.Run("Custom user agent", withServer( + func(t *testing.T) *http.ServeMux { + msg := `{ message: "hello" }` + mux := http.NewServeMux() + mux.HandleFunc("/echo-hello", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, msg) + require.Equal(t, r.Header.Get("User-Agent"), "custom-agent") + }) + return mux + }, func(t *testing.T, host string) { + cfg := config.MustNewConfigFrom(map[string]interface{}{ + "host": host, + }) + + client, err := NewWithRawConfig(nil, cfg, func(wrapped http.RoundTripper) (http.RoundTripper, error) { + return NewUserAgentRoundTripper(wrapped, "custom-agent"), nil + }) + + require.NoError(t, err) + resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + require.NoError(t, err) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, `{ message: "hello" }`, string(body)) + }, + )) + + t.Run("Enforce Kibana version", withServer( + func(t *testing.T) *http.ServeMux { + msg := `{ message: "hello" }` + mux := http.NewServeMux() + mux.HandleFunc("/echo-hello", enforceKibanaHandler(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, msg) + }, "8.0.0")) + return mux + }, func(t *testing.T, host string) { + cfg := config.MustNewConfigFrom(map[string]interface{}{ + "host": host, + }) + + client, err := NewWithRawConfig(nil, cfg, func(wrapped http.RoundTripper) (http.RoundTripper, error) { + return NewEnforceKibanaVersionRoundTripper(wrapped, "8.0.0"), nil + }) + + require.NoError(t, err) + resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + require.NoError(t, err) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, `{ message: "hello" }`, string(body)) + }, + )) + + t.Run("Allows to debug HTTP request between a client and a server", withServer( + func(t *testing.T) *http.ServeMux { + msg := `{ "message": "hello" }` + mux := http.NewServeMux() + mux.HandleFunc("/echo-hello", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, msg) + }) + return mux + }, func(t *testing.T, host string) { + + debugger := &debugStack{} + + cfg := config.MustNewConfigFrom(map[string]interface{}{ + "host": host, + }) + + client, err := NewWithRawConfig(nil, cfg, func(wrapped http.RoundTripper) (http.RoundTripper, error) { + return NewDebugRoundTripper(wrapped, debugger), nil + }) + + require.NoError(t, err) + resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, bytes.NewBuffer([]byte("hello"))) + require.NoError(t, err) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, `{ "message": "hello" }`, string(body)) + + for _, m := range debugger.messages { + fmt.Println(m) + } + + assert.Equal(t, 1, len(debugger.messages)) + }, + )) +} + +func withServer(m func(t *testing.T) *http.ServeMux, test func(t *testing.T, host string)) func(t *testing.T) { + return func(t *testing.T) { + listener, err := net.Listen("tcp", ":0") + require.NoError(t, err) + defer listener.Close() + + port := listener.Addr().(*net.TCPAddr).Port + + go http.Serve(listener, m(t)) + + test(t, "localhost:"+strconv.Itoa(port)) + } +} + +func basicAuthHandler(handler http.HandlerFunc, username, password, realm string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + + if !ok || u != username || p != password { + w.Header().Set("WWW-Authenticate", `Basic realm="`+realm+`"`) + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + handler(w, r) + } +} + +func enforceKibanaHandler(handler http.HandlerFunc, version string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("kbn-version") != version { + http.Error(w, "Bad Request", http.StatusBadRequest) + return + } + handler(w, r) + } +} + +type debugStack struct { + sync.Mutex + messages []string +} + +func (d *debugStack) Debug(args ...interface{}) { + d.Lock() + defer d.Unlock() + + // This should not happen in testing. + m, ok := args[0].(string) + if !ok { + panic("could not convert message to string ") + } + + d.messages = append(d.messages, m) +} diff --git a/x-pack/agent/pkg/kibana/config.go b/x-pack/agent/pkg/kibana/config.go new file mode 100644 index 00000000000..34975d933d9 --- /dev/null +++ b/x-pack/agent/pkg/kibana/config.go @@ -0,0 +1,53 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +import ( + "fmt" + "time" + + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" +) + +// Config is the configuration for the Kibana client. +type Config struct { + Protocol Protocol `config:"protocol" yaml:"protocol"` + SpaceID string `config:"space.id" yaml:"space.id,omitempty"` + Username string `config:"username" yaml:"username,omitempty"` + Password string `config:"password" yaml:"password,omitempty"` + Path string `config:"path" yaml:"path,omitempty"` + Host string `config:"host" yaml:"host,omitempty"` + Timeout time.Duration `config:"timeout" yaml:"timeout,omitempty"` + TLS *tlscommon.Config `config:"ssl" yaml:"ssl,omitempty"` +} + +// Protocol define the protocol to use to make the connection. (Either HTTPS or HTTP) +type Protocol string + +// Unpack the protocol. +func (p *Protocol) Unpack(from string) error { + if from != "https" && from != "http" { + return fmt.Errorf("invalid protocol %s, accepted values are 'http' and 'https'", from) + } + return nil +} + +func defaultClientConfig() Config { + return Config{ + Protocol: Protocol("http"), + Host: "localhost:5601", + Path: "", + SpaceID: "", + Username: "", + Password: "", + Timeout: 90 * time.Second, + TLS: nil, + } +} + +// IsBasicAuth returns true if the username and password are both defined. +func (c *Config) IsBasicAuth() bool { + return len(c.Username) > 0 && len(c.Password) > 0 +} diff --git a/x-pack/agent/pkg/kibana/round_trippers.go b/x-pack/agent/pkg/kibana/round_trippers.go new file mode 100644 index 00000000000..4adbee0e476 --- /dev/null +++ b/x-pack/agent/pkg/kibana/round_trippers.go @@ -0,0 +1,188 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/pkg/errors" +) + +// UserAgentRoundTripper adds a User-Agent string on every request. +type UserAgentRoundTripper struct { + rt http.RoundTripper + userAgent string +} + +// RoundTrip adds a User-Agent string on every request if its not already present. +func (r *UserAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + const userAgentHeader = "User-Agent" + if len(req.Header.Get(userAgentHeader)) == 0 { + req.Header.Set(userAgentHeader, r.userAgent) + } + + return r.rt.RoundTrip(req) +} + +// NewUserAgentRoundTripper returns a new UserAgentRoundTripper. +func NewUserAgentRoundTripper(wrapped http.RoundTripper, userAgent string) http.RoundTripper { + return &UserAgentRoundTripper{rt: wrapped, userAgent: userAgent} +} + +// DebugRoundTripper is a debugging RoundTripper that can be inserted in the chain of existing +// http.RoundTripper. This will output to the specific logger at debug level the request and response +// information for each calls. This is most useful in development or when debugging any calls +// between the agent and the Fleet API. +type DebugRoundTripper struct { + rt http.RoundTripper + log debugLogger +} + +type debugLogger interface { + Debug(args ...interface{}) +} + +// RoundTrip send the raw request and raw response from the client into the logger at debug level. +// This should not be used in a production environment because it will leak credentials. +func (r *DebugRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + // Note: I could use httputil.DumpResponse here, but I want to make sure I can pretty print + // the response of the body when I receive a JSON response. + var b strings.Builder + + b.WriteString("Request:\n") + b.WriteString(" Verb: " + req.Method + "\n") + b.WriteString(" URI: " + req.URL.RequestURI() + "\n") + b.WriteString(" Headers:\n") + + for k, v := range req.Header { + b.WriteString(" key: " + k + " values: {" + strings.Join(v, ", ") + "}\n") + } + + if req.Body != nil { + dataReq, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, errors.Wrap(err, "fail to read the body of the request") + } + req.Body.Close() + + req.Body = ioutil.NopCloser(bytes.NewBuffer(dataReq)) + + b.WriteString("Request Body:\n") + b.WriteString(string(prettyBody(dataReq)) + "\n") + } + + startTime := time.Now() + resp, err := r.rt.RoundTrip(req) + + duration := time.Since(startTime) + + b.WriteString("Response:\n") + b.WriteString(" Headers:\n") + + for k, v := range resp.Header { + b.WriteString(" key: " + k + " values: {" + strings.Join(v, ", ") + "}\n") + } + + b.WriteString(fmt.Sprintf(" Response code: %d\n", resp.StatusCode)) + b.WriteString(fmt.Sprintf("Request executed in %dms\n", duration.Nanoseconds()/int64(time.Millisecond))) + + // If the body is empty we just return + if resp.Body == nil { + r.log.Debug(b.String()) + return resp, err + } + + // Hijack the body and output it in the log, this is only for debugging and development. + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return resp, errors.Wrap(err, "fail to read the body of the response") + } + resp.Body.Close() + + b.WriteString("Response Body:\n") + b.WriteString(string(prettyBody(data)) + "\n") + + resp.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + + r.log.Debug(b.String()) + + return resp, err +} + +// NewDebugRoundTripper wraps an existing http.RoundTripper into a DebugRoundTripper that will log +// the call executed to the service. +func NewDebugRoundTripper(wrapped http.RoundTripper, log debugLogger) http.RoundTripper { + return &DebugRoundTripper{rt: wrapped, log: log} +} + +// EnforceKibanaVersionRoundTripper sets the kbn-version header on every request. +type EnforceKibanaVersionRoundTripper struct { + rt http.RoundTripper + version string +} + +// RoundTrip adds the kbn-version header, if the remote kibana is not equal or superior the call +/// will fail. +func (r *EnforceKibanaVersionRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + const key = "kbn-version" + req.Header.Set(key, r.version) + return r.rt.RoundTrip(req) +} + +// NewEnforceKibanaVersionRoundTripper enforce the remove endpoint to be a a certain version, if the +// remove kibana is not equal or superior on the requested version the call will fail. +func NewEnforceKibanaVersionRoundTripper(wrapped http.RoundTripper, version string) http.RoundTripper { + return &EnforceKibanaVersionRoundTripper{rt: wrapped, version: version} +} + +// BasicAuthRoundTripper wraps any request using a basic auth. +type BasicAuthRoundTripper struct { + rt http.RoundTripper + username string + password string +} + +// RoundTrip add username and password on every request send to the remove service. +func (r *BasicAuthRoundTripper) RoundTrip( + req *http.Request, +) (*http.Response, error) { + // if we already have authorization set on the request we do not force our username, password. + const key = "Authorization" + + if len(req.Header.Get(key)) > 0 { + return r.rt.RoundTrip(req) + } + + req.SetBasicAuth(r.username, r.password) + return r.rt.RoundTrip(req) +} + +// NewBasicAuthRoundTripper returns a Basic Auth round tripper. +func NewBasicAuthRoundTripper( + wrapped http.RoundTripper, + username, password string, +) http.RoundTripper { + return &BasicAuthRoundTripper{rt: wrapped, username: username, password: password} +} + +func prettyBody(data []byte) []byte { + var pretty bytes.Buffer + + if err := json.Indent(&pretty, data, "", " "); err != nil { + // indent doesn't valid the JSON when it parses it, we assume that if the + // buffer is empty we failed to indent anything and we just return the raw string. + if pretty.Len() > 0 { + return pretty.Bytes() + } + } + + return data +} diff --git a/x-pack/agent/pkg/packer/packer.go b/x-pack/agent/pkg/packer/packer.go new file mode 100644 index 00000000000..7d62e6a024f --- /dev/null +++ b/x-pack/agent/pkg/packer/packer.go @@ -0,0 +1,106 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package packer + +import ( + "bytes" + "compress/zlib" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" +) + +// PackMap represents multiples files packed, the key represent the path of the file and raw bytes of +// the files. +type PackMap map[string][]byte + +// Pack takes a patterns of multiples files and will read all of them, compress and encoded the data, +// it will return the encoded string, the list of files encoded and any errors. +func Pack(patterns ...string) (string, []string, error) { + var encodedFiles []string + + pack := make(PackMap) + for _, p := range patterns { + files, err := filepath.Glob(p) + if err != nil { + return "", []string{}, errors.New(err, fmt.Sprintf("error while reading pattern %s", p)) + } + for _, f := range files { + b, err := ioutil.ReadFile(f) + if err != nil { + return "", []string{}, errors.New(err, fmt.Sprintf("cannot read file %s", f)) + } + + _, ok := pack[f] + if ok { + return "", []string{}, errors.New(fmt.Sprintf("file %s already packed", f)) + } + + encodedFiles = append(encodedFiles, f) + pack[f] = b + } + } + + if len(pack) == 0 { + return "", []string{}, fmt.Errorf("no files found with provided patterns: %s", strings.Join(patterns, ", ")) + } + + var buf bytes.Buffer + w := zlib.NewWriter(&buf) + enc := json.NewEncoder(w) + if err := enc.Encode(pack); err != nil { + return "", []string{}, errors.New(err, "could not encode files") + } + // flush any buffers. + w.Close() + + return base64.StdEncoding.EncodeToString(buf.Bytes()), encodedFiles, nil +} + +// Unpack takes a Pack and return an uncompressed map with the raw bytes array. +func Unpack(pack string) (PackMap, error) { + d, err := base64.StdEncoding.DecodeString(pack) + if err != nil { + return nil, errors.New(err, "error while decoding") + } + + b := bytes.NewReader(d) + r, err := zlib.NewReader(b) + if err != nil { + return nil, errors.New(err, "error while uncompressing") + } + defer r.Close() + + var uncompressed PackMap + dec := json.NewDecoder(r) + if err := dec.Decode(&uncompressed); err != nil { + return nil, errors.New(err, "could no read the pack data") + } + + return uncompressed, nil +} + +// MustUnpack unpack the packs and will panic on error. +func MustUnpack(pack string) PackMap { + v, err := Unpack(pack) + if err != nil { + panic(err) + } + return v +} + +// MustPackFile will pack all the files matching the patterns and will panic on any errors. +func MustPackFile(patterns ...string) (string, []string) { + v, files, err := Pack(patterns...) + if err != nil { + panic(err) + } + return v, files +} diff --git a/x-pack/agent/pkg/packer/packer_test.go b/x-pack/agent/pkg/packer/packer_test.go new file mode 100644 index 00000000000..253aa8d5e7d --- /dev/null +++ b/x-pack/agent/pkg/packer/packer_test.go @@ -0,0 +1,128 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package packer + +import ( + "crypto/rand" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPacker(t *testing.T) { + type tt struct { + content map[string]string + patterns []string + failed bool + } + + withFiles := func(test tt, fn func(pattern []string, t *testing.T)) func(t *testing.T) { + return func(t *testing.T) { + d, err := ioutil.TempDir("", "packer") + require.NoError(t, err) + defer os.RemoveAll(d) + + for f, v := range test.content { + path := filepath.Join(d, f) + err := ioutil.WriteFile(path, []byte(v), 0666) + require.NoError(t, err) + } + + patterns := make([]string, len(test.patterns)) + for i, p := range test.patterns { + patterns[i] = filepath.Join(d, p) + } + + fn(patterns, t) + } + } + + normalize := func(m PackMap) map[string]string { + newM := make(map[string]string, len(m)) + for k, v := range m { + newM[filepath.Base(k)] = string(v) + } + return newM + } + + testcases := map[string]tt{ + "single files": { + content: map[string]string{ + "abc.txt": "hello world", + }, + patterns: []string{"abc.txt"}, + }, + "multiples files": { + content: map[string]string{ + "abc.txt": "hello world", + "abc2.txt": "another content", + }, + patterns: []string{"abc.txt", "abc2.txt"}, + }, + "multiples files with wildcards": { + content: map[string]string{ + "abc.txt": "hello world", + "abc2.txt": "another \n\rcontent", + }, + patterns: []string{"abc*"}, + }, + "duplicate files": { + content: map[string]string{ + "abc.txt": "hello world", + }, + patterns: []string{"abc.txt", "abc.txt"}, + failed: true, + }, + "large file": { + content: map[string]string{ + "abc.txt": mustRandStr(1024 * 1014 * 2), + }, + patterns: []string{"abc.txt"}, + }, + } + + for name, test := range testcases { + t.Run(name, withFiles(test, func(patterns []string, t *testing.T) { + packed, files, err := Pack(patterns...) + if test.failed { + require.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, test.failed, err != nil) + + uncompressed, err := Unpack(packed) + assert.NoError(t, err) + + norm := normalize(uncompressed) + assert.Equal(t, len(norm), len(files)) + assert.True(t, reflect.DeepEqual(test.content, norm)) + })) + } +} + +func randStr(length int) (string, error) { + r := make([]byte, length) + _, err := rand.Read(r) + + if err != nil { + return "", err + } + + return string(r), nil +} + +func mustRandStr(l int) string { + s, err := randStr(l) + if err != nil { + panic(err) + } + return s +} diff --git a/x-pack/agent/pkg/release/release_dev.go b/x-pack/agent/pkg/release/release_dev.go new file mode 100644 index 00000000000..a8f9db58db1 --- /dev/null +++ b/x-pack/agent/pkg/release/release_dev.go @@ -0,0 +1,16 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build dev + +package release + +import "os" + +func init() { + envVersion, ok := os.LookupEnv("BEATS_VERSION") + if ok { + version = envVersion + } +} diff --git a/x-pack/agent/pkg/release/version.go b/x-pack/agent/pkg/release/version.go new file mode 100644 index 00000000000..d0e45df3c6c --- /dev/null +++ b/x-pack/agent/pkg/release/version.go @@ -0,0 +1,41 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package release + +import "time" + +// version is the current version of the agent. +var version = "8.0.0" + +// buildHash is the hash of the current build. +var commit = "" + +// buildTime when the binary was build +var buildTime = "" + +// qualifier returns the version qualifier like alpha1. +var qualifier = "" + +// Commit returns the current build hash or unkown if it was not injected in the build process. +func Commit() string { + return commit +} + +// BuildTime returns the build time of the binaries. +func BuildTime() time.Time { + t, err := time.Parse(time.RFC3339, buildTime) + if err != nil { + return time.Time{} + } + return t +} + +// Version returns the version of the application. +func Version() string { + if qualifier == "" { + return version + } + return version + "-" + qualifier +} diff --git a/x-pack/agent/pkg/release/version_test.go b/x-pack/agent/pkg/release/version_test.go new file mode 100644 index 00000000000..279644c6750 --- /dev/null +++ b/x-pack/agent/pkg/release/version_test.go @@ -0,0 +1,42 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package release + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestVersion(t *testing.T) { + t.Run("set version without qualifier", func(t *testing.T) { + old := version + defer func() { version = old }() + version = "8.x.x" + assert.Equal(t, Version(), version) + }) + + t.Run("set version with qualifier", func(t *testing.T) { + old := version + defer func() { version = old }() + version = "8.x.x" + qualifier = "alpha1" + assert.Equal(t, Version(), version+"-"+qualifier) + }) + + t.Run("get commit hash", func(t *testing.T) { + commit = "abc1234" + assert.Equal(t, Commit(), commit) + }) + + t.Run("get build time", func(t *testing.T) { + ts := time.Now().Format(time.RFC3339) + old := buildTime + defer func() { buildTime = old }() + buildTime = ts + assert.Equal(t, ts, BuildTime().Format(time.RFC3339)) + }) +} diff --git a/x-pack/agent/pkg/reporter/backend.go b/x-pack/agent/pkg/reporter/backend.go new file mode 100644 index 00000000000..39ee2bcda5b --- /dev/null +++ b/x-pack/agent/pkg/reporter/backend.go @@ -0,0 +1,13 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package reporter + +import "context" + +// Backend defines a actual implementation of reporting. +type Backend interface { + Report(context.Context, Event) error + Close() error +} diff --git a/x-pack/agent/pkg/reporter/event.go b/x-pack/agent/pkg/reporter/event.go new file mode 100644 index 00000000000..dff0c1a89a2 --- /dev/null +++ b/x-pack/agent/pkg/reporter/event.go @@ -0,0 +1,30 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package reporter + +import "time" + +// Event is a reported event. +type Event interface { + Type() string + SubType() string + Time() time.Time + Message() string + Payload() map[string]interface{} +} + +type event struct { + eventype string + subType string + timestamp time.Time + message string + payload map[string]interface{} +} + +func (e event) Type() string { return e.eventype } +func (e event) SubType() string { return e.subType } +func (e event) Time() time.Time { return e.timestamp } +func (e event) Message() string { return e.message } +func (e event) Payload() map[string]interface{} { return e.payload } diff --git a/x-pack/agent/pkg/reporter/fleet/config.go b/x-pack/agent/pkg/reporter/fleet/config.go new file mode 100644 index 00000000000..c1538f626b4 --- /dev/null +++ b/x-pack/agent/pkg/reporter/fleet/config.go @@ -0,0 +1,19 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +// ManagementConfig is a configuration describing fleet connected parts +type ManagementConfig struct { + Threshold int `yaml:"threshold" config:"threshold" validate:"min=1"` + ReportingCheckFrequency int `yaml:"check_frequency_sec" config:"check_frequency_sec" validate:"min=1"` +} + +// DefaultFleetManagementConfig initiates FleetManagementConfig with default values +func DefaultFleetManagementConfig() *ManagementConfig { + return &ManagementConfig{ + Threshold: 10000, + ReportingCheckFrequency: 30, + } +} diff --git a/x-pack/agent/pkg/reporter/fleet/reporter.go b/x-pack/agent/pkg/reporter/fleet/reporter.go new file mode 100644 index 00000000000..7f8799b93c3 --- /dev/null +++ b/x-pack/agent/pkg/reporter/fleet/reporter.go @@ -0,0 +1,181 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "context" + "sync" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" + "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter" +) + +const ( + defaultThreshold = 1000 + agentIDKey = "elastic.agent.id" +) + +type ackFn func() + +type event struct { + AgentID string `json:"agent_id"` + EventType string `json:"type"` + Ts fleetapi.Time `json:"timestamp"` + SubType string `json:"subtype"` + Msg string `json:"message"` + Payload map[string]interface{} `json:"payload,omitempty"` +} + +func (e *event) Type() string { + return e.EventType +} + +func (e *event) Timestamp() time.Time { + return time.Time(e.Ts) +} + +func (e *event) Message() string { + return e.Msg +} + +// Reporter is a reporter without any effects, serves just as a showcase for further implementations. +type Reporter struct { + info agentInfo + logger *logger.Logger + queue []fleetapi.SerializableEvent + qlock sync.Mutex + threshold int + lastAck time.Time +} + +type agentInfo interface { + AgentID() string +} + +// NewReporter creates a new fleet reporter. +func NewReporter(agentInfo agentInfo, l *logger.Logger, c *ManagementConfig) (*Reporter, error) { + r := &Reporter{ + info: agentInfo, + queue: make([]fleetapi.SerializableEvent, 0), + logger: l, + threshold: c.Threshold, + } + + return r, nil +} + +// Report enqueue event into reporter queue. +func (r *Reporter) Report(ctx context.Context, e reporter.Event) error { + r.qlock.Lock() + defer r.qlock.Unlock() + + r.queue = append(r.queue, &event{ + AgentID: r.info.AgentID(), + EventType: e.Type(), + Ts: fleetapi.Time(e.Time()), + SubType: e.SubType(), + Msg: e.Message(), + Payload: e.Payload(), + }) + + if r.threshold > 0 && len(r.queue) > r.threshold { + // drop some low importance event if needed + r.dropEvent() + } + + return nil +} + +// Events returns a list of event from a queue and a ack function +// which clears those events once caller is done with processing. +func (r *Reporter) Events() ([]fleetapi.SerializableEvent, func()) { + r.qlock.Lock() + defer r.qlock.Unlock() + + cp := r.queueCopy() + + ackFn := func() { + // as time is monotonic and this is on single machine this should be ok. + r.clear(cp, time.Now()) + } + + return cp, ackFn +} + +func (r *Reporter) clear(items []fleetapi.SerializableEvent, ackTime time.Time) { + r.qlock.Lock() + defer r.qlock.Unlock() + + if ackTime.Sub(r.lastAck) <= 0 || + len(r.queue) == 0 || + items == nil || + len(items) == 0 { + return + } + + var dropIdx int + r.lastAck = ackTime + itemsLen := len(items) + +OUTER: + for idx := itemsLen - 1; idx >= 0; idx-- { + for i, v := range r.queue { + if v == items[idx] { + dropIdx = i + break OUTER + } + } + } + + r.queue = r.queue[dropIdx+1:] +} + +// Close stops all the background jobs reporter is running. +// Guards agains panic of closing channel multiple times. +func (r *Reporter) Close() error { + return nil +} + +func (r *Reporter) queueCopy() []fleetapi.SerializableEvent { + size := len(r.queue) + batch := make([]fleetapi.SerializableEvent, size) + + copy(batch, r.queue) + return batch +} + +func (r *Reporter) dropEvent() { + if dropped := r.tryDropInfo(); !dropped { + r.dropFirst() + } +} + +// tryDropInfo returns true if info was found and dropped. +func (r *Reporter) tryDropInfo() bool { + for i, e := range r.queue { + if e.Type() != reporter.EventTypeError { + r.queue = append(r.queue[:i], r.queue[i+1:]...) + r.logger.Infof("fleet reporter dropped event because threshold[%d] was reached: %v", r.threshold, e) + return true + } + } + + return false +} + +func (r *Reporter) dropFirst() { + if len(r.queue) == 0 { + return + } + + first := r.queue[0] + r.logger.Infof("fleet reporter dropped event because threshold[%d] was reached: %v", r.threshold, first) + r.queue = r.queue[1:] +} + +// Check it is reporter.Backend. +var _ reporter.Backend = &Reporter{} diff --git a/x-pack/agent/pkg/reporter/fleet/reporter_test.go b/x-pack/agent/pkg/reporter/fleet/reporter_test.go new file mode 100644 index 00000000000..61ed46c1169 --- /dev/null +++ b/x-pack/agent/pkg/reporter/fleet/reporter_test.go @@ -0,0 +1,241 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "context" + "testing" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/agent/pkg/fleetapi" + "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter" +) + +func TestEventsHaveAgentID(t *testing.T) { + // setup client + threshold := 10 + r := newTestReporter(1*time.Second, threshold) + + // report events + firstBatchSize := 5 + ee := getEvents(firstBatchSize) + for _, e := range ee { + r.Report(context.Background(), e) + } + + // check after delay for output + reportedEvents, _ := r.Events() + if reportedCount := len(reportedEvents); reportedCount != firstBatchSize { + t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) + } + + for _, e := range reportedEvents { + re, ok := e.(*event) + + if !ok { + t.Fatal("reported event is not an event") + } + + if re.AgentID != "agentID" { + t.Fatalf("reported event id incorrect, expected: 'agentID', got: '%v'", re.AgentID) + } + } + +} + +func TestReporting(t *testing.T) { + // setup client + threshold := 10 + r := newTestReporter(1*time.Second, threshold) + + // report events + firstBatchSize := 5 + ee := getEvents(firstBatchSize) + for _, e := range ee { + r.Report(context.Background(), e) + } + + // check after delay for output + reportedEvents, ack := r.Events() + if reportedCount := len(reportedEvents); reportedCount != firstBatchSize { + t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) + } + + // reset reported events + ack() + + // report events > threshold + secondBatchSize := threshold + 1 + ee = getEvents(secondBatchSize) + for _, e := range ee { + r.Report(context.Background(), e) + } + + // check events are dropped + reportedEvents, _ = r.Events() + if reportedCount := len(reportedEvents); reportedCount != threshold { + t.Fatalf("expected %v events got %v", secondBatchSize, reportedCount) + } +} + +func TestInfoDrop(t *testing.T) { + // setup client + threshold := 2 + r := newTestReporter(2*time.Second, threshold) + + // report 1 info and 1 error + ee := []reporter.Event{testStateEvent{}, testErrorEvent{}, testErrorEvent{}} + + for _, e := range ee { + r.Report(context.Background(), e) + } + + // check after delay for output + reportedEvents, _ := r.Events() + if reportedCount := len(reportedEvents); reportedCount != 2 { + t.Fatalf("expected %v events got %v", 2, reportedCount) + } + + // check both are errors + if reportedEvents[0].Type() != reportedEvents[1].Type() || reportedEvents[0].Type() != reporter.EventTypeError { + t.Fatalf("expected ERROR events got [1]: '%v', [2]: '%v'", reportedEvents[0].Type(), reportedEvents[1].Type()) + } +} + +func TestOutOfOrderAck(t *testing.T) { + // setup client + threshold := 100 + r := newTestReporter(1*time.Second, threshold) + + // report events + firstBatchSize := 5 + ee := getEvents(firstBatchSize) + for _, e := range ee { + r.Report(context.Background(), e) + } + + // check after delay for output + reportedEvents1, ack1 := r.Events() + if reportedCount := len(reportedEvents1); reportedCount != firstBatchSize { + t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) + } + + // report events > threshold + secondBatchSize := threshold + 1 + ee = getEvents(secondBatchSize) + for _, e := range ee { + r.Report(context.Background(), e) + } + + // check all events are returned + reportedEvents2, ack2 := r.Events() + if reportedCount := len(reportedEvents2); reportedCount == firstBatchSize+secondBatchSize { + t.Fatalf("expected %v events got %v", secondBatchSize, reportedCount) + } + + // ack second batch + ack2() + + reportedEvents, _ := r.Events() + if reportedCount := len(reportedEvents); reportedCount != 0 { + t.Fatalf("expected all events are removed after second batch ack, got %v events", reportedCount) + } + + defer func() { + r := recover() + if r != nil { + t.Fatalf("expected ack is ignored but it paniced: %v", r) + } + }() + + ack1() + reportedEvents, _ = r.Events() + if reportedCount := len(reportedEvents); reportedCount != 0 { + t.Fatalf("expected all events are still removed after first batch ack, got %v events", reportedCount) + } +} + +func TestAfterDrop(t *testing.T) { + // setup client + threshold := 7 + r := newTestReporter(1*time.Second, threshold) + + // report events + firstBatchSize := 5 + ee := getEvents(firstBatchSize) + for _, e := range ee { + r.Report(context.Background(), e) + } + + // check after delay for output + reportedEvents1, ack1 := r.Events() + if reportedCount := len(reportedEvents1); reportedCount != firstBatchSize { + t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) + } + + // report events > threshold + secondBatchSize := 5 + ee = getEvents(secondBatchSize) + for _, e := range ee { + r.Report(context.Background(), e) + } + + // check all events are returned + reportedEvents2, _ := r.Events() + if reportedCount := len(reportedEvents2); reportedCount != threshold { + t.Fatalf("expected %v events got %v", secondBatchSize, reportedCount) + } + + // remove first batch from queue + ack1() + + reportedEvents, _ := r.Events() + if reportedCount := len(reportedEvents); reportedCount != secondBatchSize { + t.Fatalf("expected all events from first batch are removed, got %v events", reportedCount) + } + +} + +func getEvents(count int) []reporter.Event { + ee := make([]reporter.Event, 0, count) + for i := 0; i < count; i++ { + ee = append(ee, testStateEvent{}) + } + + return ee +} + +func newTestReporter(frequency time.Duration, threshold int) *Reporter { + log, _ := logger.New() + r := &Reporter{ + info: &testInfo{}, + queue: make([]fleetapi.SerializableEvent, 0), + logger: log, + threshold: threshold, + } + + return r +} + +type testInfo struct{} + +func (*testInfo) AgentID() string { return "agentID" } + +type testStateEvent struct{} + +func (testStateEvent) Type() string { return reporter.EventTypeState } +func (testStateEvent) SubType() string { return reporter.EventSubTypeInProgress } +func (testStateEvent) Time() time.Time { return time.Unix(0, 1) } +func (testStateEvent) Message() string { return "hello" } +func (testStateEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} } + +type testErrorEvent struct{} + +func (testErrorEvent) Type() string { return reporter.EventTypeError } +func (testErrorEvent) SubType() string { return "PATH" } +func (testErrorEvent) Time() time.Time { return time.Unix(0, 1) } +func (testErrorEvent) Message() string { return "hello" } +func (testErrorEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} } diff --git a/x-pack/agent/pkg/reporter/log/config.go b/x-pack/agent/pkg/reporter/log/config.go new file mode 100644 index 00000000000..ef69afe9bbe --- /dev/null +++ b/x-pack/agent/pkg/reporter/log/config.go @@ -0,0 +1,17 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package log + +// Config is a configuration describing log reporter behavior +type Config struct { + Format Format `config:"format" yaml:"format"` +} + +// DefaultLogConfig initiates LogConfig with default values +func DefaultLogConfig() *Config { + return &Config{ + Format: DefaultFormat, + } +} diff --git a/x-pack/agent/pkg/reporter/log/format.go b/x-pack/agent/pkg/reporter/log/format.go new file mode 100644 index 00000000000..dcb08f5b1c4 --- /dev/null +++ b/x-pack/agent/pkg/reporter/log/format.go @@ -0,0 +1,59 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package log + +import ( + "fmt" + "time" +) + +// Format used for logging [DefaultFormat, JSONFormat] +type Format bool + +const ( + // DefaultFormat is a log format, resulting in: "2006-01-02T15:04:05: type: 'STATE': event type: 'STARTING' message: Application 'filebeat' is starting." + DefaultFormat Format = true + // JSONFormat is a log format, resulting in: {"timestamp": "2006-01-02T15:04:05", "type": "STATE", "event": {"type": "STARTING", "message": "Application 'filebeat' is starting."} + JSONFormat Format = false +) + +const ( + // e.g "2006-01-02T15:04:05: type: 'STATE': event type: 'STARTING' message: Application 'filebeat' is starting." + defaultLogFormat = "%s: type: '%s': sub_type: '%s' message: %s" + timeFormat = time.RFC3339 +) + +var formatMap = map[string]Format{ + "default": DefaultFormat, + "json": JSONFormat, +} + +var reverseMap = map[bool]string{ + true: "default", + false: "json", +} + +// Unpack enables using of string values in config +func (m *Format) Unpack(v string) error { + mgt, ok := formatMap[v] + if !ok { + return fmt.Errorf( + "unknown format, received '%s' and valid values are default or json", + v, + ) + } + *m = mgt + return nil +} + +// MarshalYAML marshal into a string. +func (m Format) MarshalYAML() (interface{}, error) { + s, ok := reverseMap[bool(m)] + if !ok { + return nil, fmt.Errorf("cannot marshal value of %+v", m) + } + + return s, nil +} diff --git a/x-pack/agent/pkg/reporter/log/reporter.go b/x-pack/agent/pkg/reporter/log/reporter.go new file mode 100644 index 00000000000..04fc28c394b --- /dev/null +++ b/x-pack/agent/pkg/reporter/log/reporter.go @@ -0,0 +1,91 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package log + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter" +) + +type logger interface { + Error(...interface{}) + Info(...interface{}) +} + +// Reporter is a reporter without any effects, serves just as a showcase for further implementations. +type Reporter struct { + logger logger + formatFunc func(record reporter.Event) string +} + +// NewReporter creates a new noop reporter +func NewReporter(l logger, cfg *Config) *Reporter { + format := DefaultFormat + if cfg != nil { + format = cfg.Format + } + + formatFunc := defaultFormatFunc + if format == JSONFormat { + formatFunc = jsonFormatFunc + } + + return &Reporter{ + logger: l, + formatFunc: formatFunc, + } +} + +// Report in noop reporter does nothing +func (r *Reporter) Report(ctx context.Context, record reporter.Event) error { + if record.Type() == reporter.EventTypeError { + r.logger.Error(r.formatFunc(record)) + return nil + } + + r.logger.Info(r.formatFunc(record)) + return nil +} + +// Close stops all the background jobs reporter is running. +func (r *Reporter) Close() error { return nil } + +func defaultFormatFunc(e reporter.Event) string { + return fmt.Sprintf(defaultLogFormat, + e.Time().Format(timeFormat), + e.Type(), + e.SubType(), + e.Message(), + ) +} + +func jsonFormatFunc(record reporter.Event) string { + b, _ := json.Marshal(makeEventReportable(record)) + return string(b) +} + +type reportableEvent struct { + Type string + SubType string + Time string + Message string + Payload map[string]interface{} `json:"payload,omitempty"` +} + +func makeEventReportable(event reporter.Event) reportableEvent { + return reportableEvent{ + Type: event.Type(), + SubType: event.SubType(), + Time: event.Time().Format(timeFormat), + Message: event.Message(), + Payload: event.Payload(), + } +} + +// Check it is reporter.Backend +var _ reporter.Backend = &Reporter{} diff --git a/x-pack/agent/pkg/reporter/log/reporter_test.go b/x-pack/agent/pkg/reporter/log/reporter_test.go new file mode 100644 index 00000000000..a551809f0f7 --- /dev/null +++ b/x-pack/agent/pkg/reporter/log/reporter_test.go @@ -0,0 +1,108 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package log + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter" +) + +type testCase struct { + event reporter.Event + format Format + expectedInfo string + expectedError string +} + +func TestReport(t *testing.T) { + infoEvent := generateEvent(reporter.EventTypeState, reporter.EventSubTypeStarting) + errorEvent := generateEvent(reporter.EventTypeError, reporter.EventSubTypeConfig) + + testCases := []testCase{ + testCase{infoEvent, DefaultFormat, DefaultString(infoEvent), ""}, + testCase{infoEvent, JSONFormat, JSONString(infoEvent), ""}, + testCase{errorEvent, DefaultFormat, "", DefaultString(errorEvent)}, + testCase{errorEvent, JSONFormat, "", JSONString(errorEvent)}, + } + + for _, tc := range testCases { + cfg := DefaultLogConfig() + cfg.Format = tc.format + + log := newTestLogger() + rep := NewReporter(log, cfg) + + rep.Report(context.Background(), tc.event) + + if got := log.info(); tc.expectedInfo != got { + t.Errorf("[%s.%s(%v)] expected info '%s' got '%s'", tc.event.Type(), tc.event.SubType(), tc.format, tc.expectedInfo, got) + } + + if got := log.error(); tc.expectedError != got { + t.Errorf("[%s.%s(%v)] expected error '%s' got '%s'", tc.event.Type(), tc.event.SubType(), tc.format, tc.expectedError, got) + } + } +} + +type testLogger struct { + errorLog string + infoLog string +} + +func newTestLogger() *testLogger { + t := &testLogger{} + return t +} + +func (t *testLogger) Error(args ...interface{}) { + t.errorLog = fmt.Sprint(args...) +} + +func (t *testLogger) Info(args ...interface{}) { + t.infoLog = fmt.Sprint(args...) +} + +func (t *testLogger) error() string { + return t.errorLog +} + +func (t *testLogger) info() string { + return t.infoLog +} + +func generateEvent(eventype, subType string) testEvent { + return testEvent{ + eventtype: eventype, + subType: subType, + timestamp: time.Unix(0, 1), + message: "message", + } +} + +type testEvent struct { + eventtype string + subType string + timestamp time.Time + message string +} + +func (t testEvent) Type() string { return t.eventtype } +func (t testEvent) SubType() string { return t.subType } +func (t testEvent) Time() time.Time { return t.timestamp } +func (t testEvent) Message() string { return t.message } +func (testEvent) Payload() map[string]interface{} { return map[string]interface{}{} } + +func JSONString(event testEvent) string { + timestamp := event.timestamp.Format(timeFormat) + return fmt.Sprintf(`{"Type":"%s","SubType":"%s","Time":"%s","Message":"message"}`, event.Type(), event.SubType(), timestamp) +} +func DefaultString(event testEvent) string { + timestamp := event.timestamp.Format(timeFormat) + return fmt.Sprintf("%s: type: '%s': sub_type: '%s' message: message", timestamp, event.Type(), event.SubType()) +} diff --git a/x-pack/agent/pkg/reporter/noop/reporter.go b/x-pack/agent/pkg/reporter/noop/reporter.go new file mode 100644 index 00000000000..6412c9ff162 --- /dev/null +++ b/x-pack/agent/pkg/reporter/noop/reporter.go @@ -0,0 +1,28 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package noop + +import ( + "context" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/reporter" +) + +// Reporter is a reporter without any effects, serves just as a showcase for further implementations. +type Reporter struct{} + +// NewReporter creates a new noop reporter +func NewReporter() *Reporter { + return &Reporter{} +} + +// Report in noop reporter does nothing +func (*Reporter) Report(_ context.Context, _ reporter.Event) error { return nil } + +// Close stops all the background jobs reporter is running. +func (*Reporter) Close() error { return nil } + +// Check it is reporter.Backend +var _ reporter.Backend = &Reporter{} diff --git a/x-pack/agent/pkg/reporter/reporter.go b/x-pack/agent/pkg/reporter/reporter.go new file mode 100644 index 00000000000..74bea56a760 --- /dev/null +++ b/x-pack/agent/pkg/reporter/reporter.go @@ -0,0 +1,130 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package reporter + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/go-multierror" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/logger" +) + +const ( + // EventTypeState is an record type describing application state change + EventTypeState = "STATE" + // EventTypeError is an record type describing application error + EventTypeError = "ERROR" + // EventTypeActionResult is an record type describing applications result of an action + EventTypeActionResult = "ACTION_RESULT" + + // EventSubTypeStarting is an event type indicating application is starting + EventSubTypeStarting = "STARTING" + // EventSubTypeInProgress is an event type indicating application is in progress + EventSubTypeInProgress = "IN_PROGRESS" + // EventSubTypeConfig is an event indicating application config related event. + EventSubTypeConfig = "CONFIG" + // EventSubTypeStopping is an event type indicating application is stopping + EventSubTypeStopping = "STOPPING" + // EventSubTypeStopped is an event type indicating application is stopped + EventSubTypeStopped = "STOPPED" +) + +type agentInfo interface { + AgentID() string +} + +// Reporter uses multiple backends which needs to be non-blocking +// to report various events. +type Reporter struct { + info agentInfo + backends []Backend + + l *logger.Logger +} + +// NewReporter creates a new reporter with provided set of Backends. +func NewReporter(ctx context.Context, logger *logger.Logger, info agentInfo, backends ...Backend) *Reporter { + return &Reporter{ + info: info, + backends: backends, + l: logger, + } +} + +// Close stops the reporter. For further reporting new reporter needs to be created. +func (r *Reporter) Close() { + for _, c := range r.backends { + c.Close() + } +} + +// OnStarting reports application starting event. +func (r *Reporter) OnStarting(ctx context.Context, application string) { + msg := fmt.Sprintf("Application: %s[%s]: State change: STARTING", application, r.info.AgentID()) + rec := generateRecord(EventTypeState, EventSubTypeStarting, msg) + r.report(ctx, rec) +} + +// OnRunning reports application running event. +func (r *Reporter) OnRunning(ctx context.Context, application string) { + msg := fmt.Sprintf("Application: %s[%s]: State change: IN_PROGRESS", application, r.info.AgentID()) + rec := generateRecord(EventTypeState, EventSubTypeInProgress, msg) + r.report(ctx, rec) +} + +// OnFailing reports application failed event. +func (r *Reporter) OnFailing(ctx context.Context, application string, err error) { + msg := fmt.Sprintf("Application: %s[%s]: %v", application, r.info.AgentID(), err) + rec := generateRecord(EventTypeError, EventSubTypeConfig, msg) + r.report(ctx, rec) +} + +// OnStopping reports application stopped event. +func (r *Reporter) OnStopping(ctx context.Context, application string) { + msg := fmt.Sprintf("Application: %s[%s]: State change: STOPPING", application, r.info.AgentID()) + rec := generateRecord(EventTypeState, EventSubTypeStopping, msg) + r.report(ctx, rec) +} + +// OnStopped reports application stopped event. +func (r *Reporter) OnStopped(ctx context.Context, application string) { + msg := fmt.Sprintf("Application: %s[%s]: State change: STOPPED", application, r.info.AgentID()) + rec := generateRecord(EventTypeState, EventSubTypeStopped, msg) + r.report(ctx, rec) +} + +// OnFatal reports applications fatal event. +func (r *Reporter) OnFatal(ctx context.Context, application string, err error) { + msg := fmt.Sprintf("Application: %s[%s]: %v", application, r.info.AgentID(), err) + rec := generateRecord(EventTypeError, EventSubTypeConfig, msg) + r.report(ctx, rec) +} + +func (r *Reporter) report(ctx context.Context, e event) { + var err error + + for _, b := range r.backends { + if er := b.Report(ctx, e); er != nil { + err = multierror.Append(err, er) + } + } + + if err != nil { + r.l.Error(errors.New(err, "failed reporting event")) + } +} + +func generateRecord(eventype, subType, message string) event { + return event{ + eventype: eventype, + subType: subType, + timestamp: time.Now(), + message: message, + } +} diff --git a/x-pack/agent/pkg/reporter/reporter_test.go b/x-pack/agent/pkg/reporter/reporter_test.go new file mode 100644 index 00000000000..5f95824279c --- /dev/null +++ b/x-pack/agent/pkg/reporter/reporter_test.go @@ -0,0 +1,90 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package reporter + +import ( + "context" + "errors" + "testing" +) + +var result Event + +type testReporter struct{} + +func (t *testReporter) Close() error { return nil } +func (t *testReporter) Report(_ context.Context, r Event) error { + result = r + return nil +} + +type info struct{} + +func (*info) AgentID() string { return "id" } + +func TestTypes(t *testing.T) { + rep := NewReporter(context.Background(), nil, &info{}, &testReporter{}) + // test starting + rep.OnStarting(context.Background(), "a1") + if r := result.Type(); r != EventTypeState { + t.Errorf("OnStarting: expected record type '%v', got '%v'", EventTypeState, r) + } + + if r := result.SubType(); r != EventSubTypeStarting { + t.Errorf("OnStarting: expected event type '%v', got '%v'", EventSubTypeStarting, r) + } + + // test in progress + rep.OnRunning(context.Background(), "a2") + if r := result.Type(); r != EventTypeState { + t.Errorf("OnRunning: expected record type '%v', got '%v'", EventTypeState, r) + } + + if r := result.SubType(); r != EventSubTypeInProgress { + t.Errorf("OnRunning: expected event type '%v', got '%v'", EventSubTypeStarting, r) + } + + // test stopping + rep.OnStopping(context.Background(), "a3") + if r := result.Type(); r != EventTypeState { + t.Errorf("OnStopping: expected record type '%v', got '%v'", EventTypeState, r) + } + + if r := result.SubType(); r != EventSubTypeStopping { + t.Errorf("OnStopping: expected event type '%v', got '%v'", EventSubTypeStarting, r) + } + + // test stopped + rep.OnStopped(context.Background(), "a4") + if r := result.Type(); r != EventTypeState { + t.Errorf("OnStopped: expected record type '%v', got '%v'", EventTypeState, r) + } + + if r := result.SubType(); r != EventSubTypeStopped { + t.Errorf("OnStopped: expected event type '%v', got '%v'", EventSubTypeStarting, r) + } + + // test failing + err := errors.New("e1") + rep.OnFailing(context.Background(), "a5", err) + if r := result.Type(); r != EventTypeError { + t.Errorf("OnFailing: expected record type '%v', got '%v'", EventTypeState, r) + } + + if r := result.SubType(); r != EventSubTypeConfig { + t.Errorf("OnFailing: expected event type '%v', got '%v'", EventSubTypeStarting, r) + } + + // test fatal + err = errors.New("e2") + rep.OnFatal(context.Background(), "a6", err) + if r := result.Type(); r != EventTypeError { + t.Errorf("OnFatal: expected record type '%v', got '%v'", EventTypeState, r) + } + + if r := result.SubType(); r != EventSubTypeConfig { + t.Errorf("OnFatal: expected event type '%v', got '%v'", EventSubTypeStarting, r) + } +} diff --git a/x-pack/agent/pkg/scheduler/scheduler.go b/x-pack/agent/pkg/scheduler/scheduler.go new file mode 100644 index 00000000000..c4c9b9d55eb --- /dev/null +++ b/x-pack/agent/pkg/scheduler/scheduler.go @@ -0,0 +1,134 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package scheduler + +import ( + "math/rand" + "time" +) + +// Scheduler simple interface that encapsulate the scheduling logic, this is useful if you want to +// test asynchronous code in a synchronous way. +type Scheduler interface { + WaitTick() <-chan time.Time + Stop() +} + +// Stepper is a scheduler where each Tick is manually triggered, this is useful in scenario +// when you want to test the behavior of asynchronous code in a synchronous way. +type Stepper struct { + C chan time.Time +} + +// Next trigger the WaitTick unblock manually. +func (s *Stepper) Next() { + s.C <- time.Now() +} + +// WaitTick returns a channel to watch for ticks. +func (s *Stepper) WaitTick() <-chan time.Time { + return s.C +} + +// Stop is stopping the scheduler, in the case of the Stepper scheduler nothing is done. +func (s *Stepper) Stop() {} + +// NewStepper returns a new Stepper scheduler where the tick is manually controlled. +func NewStepper() *Stepper { + return &Stepper{ + C: make(chan time.Time), + } +} + +// Periodic wraps a time.Timer as the scheduler. +type Periodic struct { + Ticker *time.Ticker + ran bool +} + +// NewPeriodic returns a Periodic scheduler that will unblock the WaitTick based on a duration. +// The timer will do an initial tick, sleep for the defined period and tick again. +func NewPeriodic(d time.Duration) *Periodic { + return &Periodic{Ticker: time.NewTicker(d)} +} + +// WaitTick wait on the duration to be experied to unblock the channel. +// Note: you should not keep a reference to the channel. +func (p *Periodic) WaitTick() <-chan time.Time { + if p.ran { + return p.Ticker.C + } + + rC := make(chan time.Time, 1) + rC <- time.Now() + p.ran = true + + return rC +} + +// Stop stops the internal Ticker. +// Note this will not close the internal channel is up to the developer to unblock the goroutine +// using another mechanism. +func (p *Periodic) Stop() { + p.Ticker.Stop() +} + +// PeriodicJitter is as scheduler that will periodically create a timer ticker and sleep, to +// better distribute the load on the network and remote endpoint the timer will introduce variance +// on each sleep. +type PeriodicJitter struct { + C chan time.Time + ran bool + d time.Duration + variance time.Duration + done chan struct{} +} + +// NewPeriodicJitter creates a new PeriodicJitter. +func NewPeriodicJitter(d, variance time.Duration) *PeriodicJitter { + return &PeriodicJitter{ + C: make(chan time.Time, 1), + d: d, + variance: variance, + done: make(chan struct{}), + } +} + +// WaitTick wait on the duration plus some jitter to unblock the channel. +// Note: you should not keep a reference to the channel. +func (p *PeriodicJitter) WaitTick() <-chan time.Time { + if !p.ran { + // Sleep for only the variance, this will smooth the initial bootstrap of all the agents. + select { + case <-time.After(p.delay()): + p.C <- time.Now() + case <-p.done: + p.C <- time.Now() + close(p.C) + } + p.ran = true + return p.C + } + + select { + case <-time.After(p.d + p.delay()): + p.C <- time.Now() + case <-p.done: + p.C <- time.Now() + close(p.C) + } + + return p.C +} + +// Stop stops the PeriodicJitter scheduler. +func (p *PeriodicJitter) Stop() { + close(p.done) +} + +func (p *PeriodicJitter) delay() time.Duration { + t := int64(p.variance) + return time.Duration(rand.Int63n(t)) +} diff --git a/x-pack/agent/pkg/scheduler/scheduler_test.go b/x-pack/agent/pkg/scheduler/scheduler_test.go new file mode 100644 index 00000000000..66c7e1e1d3c --- /dev/null +++ b/x-pack/agent/pkg/scheduler/scheduler_test.go @@ -0,0 +1,193 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package scheduler + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type e struct { + count int + at time.Time +} + +type tickRecorder struct { + scheduler Scheduler + count int + done chan struct{} + recorder chan e +} + +func (m *tickRecorder) Start() { + for { + select { + case t := <-m.scheduler.WaitTick(): + m.count = m.count + 1 + m.recorder <- e{count: m.count, at: t} + case <-m.done: + return + } + } +} + +func (m *tickRecorder) Stop() { + close(m.done) +} + +func TestScheduler(t *testing.T) { + t.Run("Step scheduler", testStepScheduler) + t.Run("Periodic scheduler", testPeriodic) + t.Run("PeriodicJitter scheduler", testPeriodicJitter) +} + +func newTickRecorder(scheduler Scheduler) *tickRecorder { + return &tickRecorder{ + scheduler: scheduler, + done: make(chan struct{}), + recorder: make(chan e), + } +} + +func testStepScheduler(t *testing.T) { + t.Run("Trigger the Tick manually", func(t *testing.T) { + scheduler := NewStepper() + defer scheduler.Stop() + + recorder := newTickRecorder(scheduler) + go recorder.Start() + defer recorder.Stop() + + scheduler.Next() + nE := <-recorder.recorder + require.Equal(t, 1, nE.count) + scheduler.Next() + nE = <-recorder.recorder + require.Equal(t, 2, nE.count) + scheduler.Next() + nE = <-recorder.recorder + require.Equal(t, 3, nE.count) + }) +} + +func testPeriodic(t *testing.T) { + t.Run("tick than wait", func(t *testing.T) { + duration := 1 * time.Minute + scheduler := NewPeriodic(duration) + defer scheduler.Stop() + + startedAt := time.Now() + recorder := newTickRecorder(scheduler) + go recorder.Start() + defer recorder.Stop() + + nE := <-recorder.recorder + + require.True(t, nE.at.Sub(startedAt) < duration) + }) + + t.Run("multiple ticks", func(t *testing.T) { + duration := 1 * time.Millisecond + scheduler := NewPeriodic(duration) + defer scheduler.Stop() + + recorder := newTickRecorder(scheduler) + go recorder.Start() + defer recorder.Stop() + + nE := <-recorder.recorder + require.Equal(t, 1, nE.count) + nE = <-recorder.recorder + require.Equal(t, 2, nE.count) + nE = <-recorder.recorder + require.Equal(t, 3, nE.count) + }) +} + +func testPeriodicJitter(t *testing.T) { + t.Run("tick than wait", func(t *testing.T) { + duration := 5 * time.Second + variance := 2 * time.Second + scheduler := NewPeriodicJitter(duration, variance) + defer scheduler.Stop() + + startedAt := time.Now() + recorder := newTickRecorder(scheduler) + go recorder.Start() + defer recorder.Stop() + + nE := <-recorder.recorder + + diff := nE.at.Sub(startedAt) + require.True( + t, + diff < duration, + ) + + startedAt = time.Now() + nE = <-recorder.recorder + diff = nE.at.Sub(startedAt) + require.True( + t, + diff >= duration, + ) + }) + + t.Run("multiple ticks", func(t *testing.T) { + duration := 1 * time.Millisecond + variance := 100 * time.Millisecond + scheduler := NewPeriodicJitter(duration, variance) + defer scheduler.Stop() + + recorder := newTickRecorder(scheduler) + go recorder.Start() + defer recorder.Stop() + + nE := <-recorder.recorder + require.Equal(t, 1, nE.count) + nE = <-recorder.recorder + require.Equal(t, 2, nE.count) + nE = <-recorder.recorder + require.Equal(t, 3, nE.count) + }) + + t.Run("unblock on first tick", func(t *testing.T) { + duration := 30 * time.Minute + variance := 30 * time.Minute + scheduler := NewPeriodicJitter(duration, variance) + + go func() { + // Not a fan of introducing sync-timing-code but + // give us a chance to be waiting. + <-time.After(500 * time.Millisecond) + scheduler.Stop() + }() + + <-scheduler.WaitTick() + }) + + t.Run("unblock on any tick", func(t *testing.T) { + duration := 1 * time.Millisecond + variance := 2 * time.Second + scheduler := NewPeriodicJitter(duration, variance) + + <-scheduler.WaitTick() + + // Increase time between next tick + scheduler.d = 20 * time.Minute + scheduler.variance = 20 * time.Minute + + go func() { + // Not a fan of introducing sync-timing-code but + // give us a chance to be waiting. + <-time.After(500 * time.Millisecond) + scheduler.Stop() + }() + + <-scheduler.WaitTick() + }) +} diff --git a/x-pack/agent/pkg/sorted/set.go b/x-pack/agent/pkg/sorted/set.go new file mode 100644 index 00000000000..739e525aac5 --- /dev/null +++ b/x-pack/agent/pkg/sorted/set.go @@ -0,0 +1,60 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sorted + +import ( + "sort" +) + +// Set is a sorted set that allow to iterate on they keys in an ordered manner, when +// items are added or removed from the Set the keys are sorted. +type Set struct { + mapped map[string]interface{} + keys []string +} + +// NewSet returns an ordered set. +func NewSet() *Set { + return &Set{ + mapped: make(map[string]interface{}), + } +} + +// Add adds an items to the set. +func (s *Set) Add(k string, v interface{}) { + _, ok := s.mapped[k] + if !ok { + s.keys = append(s.keys, k) + sort.Strings(s.keys) + } + + s.mapped[k] = v +} + +// Remove removes an items from the Set. +func (s *Set) Remove(k string) { + _, ok := s.mapped[k] + if !ok { + return + } + + delete(s.mapped, k) + + pos := sort.SearchStrings(s.keys, k) + if pos < len(s.keys) && s.keys[pos] == k { + s.keys = append(s.keys[:pos], s.keys[pos+1:]...) + } +} + +// Get retrieves a specific values from the map and will return false if the key is not found. +func (s *Set) Get(k string) (interface{}, bool) { + v, ok := s.mapped[k] + return v, ok +} + +// Keys returns slice of keys where the keys are ordered alphabetically. +func (s *Set) Keys() []string { + return append(s.keys[:0:0], s.keys...) +} diff --git a/x-pack/agent/pkg/sorted/set_test.go b/x-pack/agent/pkg/sorted/set_test.go new file mode 100644 index 00000000000..77dcf596062 --- /dev/null +++ b/x-pack/agent/pkg/sorted/set_test.go @@ -0,0 +1,161 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sorted + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSet(t *testing.T) { + type kv struct { + k string + v interface{} + } + t.Run("adding items and keep it ordered", func(t *testing.T) { + input := []kv{ + kv{k: "a", v: 1}, + kv{k: "x", v: 1}, + kv{k: "c", v: 1}, + kv{k: "b", v: 1}, + } + + s := NewSet() + + for _, kv := range input { + s.Add(kv.k, kv.v) + } + + expected := []string{ + "a", "b", "c", "x", + } + + require.Equal(t, expected, s.Keys()) + }) + + t.Run("order is preserved when items are removed", func(t *testing.T) { + input := []kv{ + kv{k: "a", v: 1}, + kv{k: "x", v: 1}, + kv{k: "c", v: 1}, + kv{k: "b", v: 1}, + } + + s := NewSet() + + for _, kv := range input { + s.Add(kv.k, kv.v) + } + + expected := []string{ + "a", "b", "x", + } + + s.Remove("c") + + require.Equal(t, expected, s.Keys()) + }) + + t.Run("return true when the key exist", func(t *testing.T) { + s := NewSet() + s.Add("hello", "world") + v, ok := s.Get("hello") + require.True(t, ok) + require.Equal(t, "world", v) + }) + + t.Run("return false when the key dont exist", func(t *testing.T) { + s := NewSet() + v, ok := s.Get("hello") + require.False(t, ok) + require.Equal(t, nil, v) + }) + + t.Run("return false when the key dont exist", func(t *testing.T) { + s := NewSet() + s.Remove("dont-exist") + }) + + t.Run("can remove the last item", func(t *testing.T) { + input := []kv{ + kv{k: "a", v: 1}, + kv{k: "x", v: 1}, + kv{k: "c", v: 1}, + kv{k: "b", v: 1}, + } + + s := NewSet() + + for _, kv := range input { + s.Add(kv.k, kv.v) + } + + expected := []string{ + "a", "b", "c", + } + + s.Remove("x") + + require.Equal(t, expected, s.Keys()) + }) + + t.Run("can remove the only item", func(t *testing.T) { + s := NewSet() + s.Add("hello", "world") + v, ok := s.Get("hello") + require.True(t, ok) + require.Equal(t, "world", v) + s.Remove("hello") + + require.Equal(t, []string{}, s.Keys()) + }) + + t.Run("can remove multiple items", func(t *testing.T) { + input := []kv{ + kv{k: "a", v: 1}, + kv{k: "x", v: 1}, + kv{k: "c", v: 1}, + kv{k: "b", v: 1}, + } + + s := NewSet() + + for _, kv := range input { + s.Add(kv.k, kv.v) + } + + require.Equal(t, []string{"a", "b", "c", "x"}, s.Keys()) + s.Remove("a") + require.Equal(t, []string{"b", "c", "x"}, s.Keys()) + s.Remove("b") + require.Equal(t, []string{"c", "x"}, s.Keys()) + s.Remove("c") + require.Equal(t, []string{"x"}, s.Keys()) + s.Remove("x") + require.Equal(t, []string{}, s.Keys()) + }) + + t.Run("make sure keys() returns a copy", func(t *testing.T) { + input := []kv{ + kv{k: "a", v: 1}, + kv{k: "x", v: 1}, + kv{k: "c", v: 1}, + kv{k: "b", v: 1}, + } + + s := NewSet() + + for _, kv := range input { + s.Add(kv.k, kv.v) + } + + keys := s.Keys() + for _, k := range keys { + require.Equal(t, []string{"a", "b", "c", "x"}, keys) + s.Remove(k) + } + }) +} diff --git a/x-pack/agent/pkg/tokenbucket/token_bucket.go b/x-pack/agent/pkg/tokenbucket/token_bucket.go new file mode 100644 index 00000000000..2caba44ddb2 --- /dev/null +++ b/x-pack/agent/pkg/tokenbucket/token_bucket.go @@ -0,0 +1,82 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package tokenbucket + +import ( + "fmt" + "time" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/scheduler" +) + +// Bucket is a Token Bucket for rate limiting +type Bucket struct { + size int + dropAmount int + rateChan chan struct{} + closeChan chan struct{} + scheduler scheduler.Scheduler +} + +// NewTokenBucket creates a bucket and starts it. +// size: total size of the bucket +// dropAmount: amount which is dropped per every specified interval +// dropRate: specified interval when drop will happen +func NewTokenBucket(size, dropAmount int, dropRate time.Duration) (*Bucket, error) { + s := scheduler.NewPeriodic(dropRate) + return newTokenBucketWithScheduler(size, dropAmount, s) +} + +func newTokenBucketWithScheduler( + size, dropAmount int, + s scheduler.Scheduler, +) (*Bucket, error) { + if dropAmount > size { + return nil, fmt.Errorf( + "TokenBucket: invalid configuration, size '%d' is lower than drop amount '%d'", + size, + dropAmount, + ) + } + + b := &Bucket{ + dropAmount: dropAmount, + rateChan: make(chan struct{}, size), + closeChan: make(chan struct{}), + scheduler: s, + } + go b.run() + + return b, nil +} + +// Add adds item into a bucket. Add blocks until it is able to add item into a bucket. +func (b *Bucket) Add() { + b.rateChan <- struct{}{} +} + +// Close stops the rate limiting and does not let pass anything anymore. +func (b *Bucket) Close() { + close(b.closeChan) + close(b.rateChan) + b.scheduler.Stop() +} + +// run runs basic loop and consumes configured tokens per every configured period. +func (b *Bucket) run() { + for { + select { + case <-b.scheduler.WaitTick(): + for i := 0; i < b.dropAmount; i++ { + select { + case <-b.rateChan: + default: // do not cumulate drops + } + } + case <-b.closeChan: + return + } + } +} diff --git a/x-pack/agent/pkg/tokenbucket/token_bucket_test.go b/x-pack/agent/pkg/tokenbucket/token_bucket_test.go new file mode 100644 index 00000000000..f1ad6c452a0 --- /dev/null +++ b/x-pack/agent/pkg/tokenbucket/token_bucket_test.go @@ -0,0 +1,104 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package tokenbucket + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/x-pack/agent/pkg/scheduler" +) + +func TestTokenBucket(t *testing.T) { + dropAmount := 1 + bucketSize := 3 + + t.Run("when way below the bucket size it should not block", func(t *testing.T) { + stepper := scheduler.NewStepper() + + b, err := newTokenBucketWithScheduler( + bucketSize, + dropAmount, + stepper, + ) + + assert.NoError(t, err, "initiating a bucket failed") + + // Below the bucket size and should not block. + b.Add() + }) + + t.Run("when below the bucket size it should not block", func(t *testing.T) { + stepper := scheduler.NewStepper() + + b, err := newTokenBucketWithScheduler( + bucketSize, + dropAmount, + stepper, + ) + + assert.NoError(t, err, "initiating a bucket failed") + + // Below the bucket size and should not block. + b.Add() + b.Add() + }) + + t.Run("when we hit the bucket size it should block", func(t *testing.T) { + stepper := scheduler.NewStepper() + + b, err := newTokenBucketWithScheduler( + bucketSize, + dropAmount, + stepper, + ) + + assert.NoError(t, err, "initiating a bucket failed") + + // Same as the bucket size and should block. + b.Add() + b.Add() + b.Add() + + // Out of bound unblock calls + unblock := func() { + var wg sync.WaitGroup + wg.Add(1) + go func(wg *sync.WaitGroup) { + wg.Done() + + // will unblock the next Add after a second. + <-time.After(1 * time.Second) + stepper.Next() + }(&wg) + wg.Wait() + } + + unblock() + b.Add() // Should block and be unblocked, if not unblock test will timeout. + unblock() + b.Add() // Should block and be unblocked, if not unblock test will timeout. + }) + + t.Run("When we use a timer scheduler we can unblock", func(t *testing.T) { + d := 1 * time.Second + b, err := NewTokenBucket( + bucketSize, + dropAmount, + d, + ) + + assert.NoError(t, err, "initiating a bucket failed") + + // Same as the bucket size and should block. + b.Add() + b.Add() + b.Add() + b.Add() // Should block and be unblocked, if not unblock test will timeout. + }) +} diff --git a/x-pack/agent/proto/remote_config.proto b/x-pack/agent/proto/remote_config.proto new file mode 100644 index 00000000000..7b151a9205f --- /dev/null +++ b/x-pack/agent/proto/remote_config.proto @@ -0,0 +1,22 @@ + +syntax = "proto3"; +package remoteconfig; + +message ConfigRequest { + string config = 1; +} + +message ConfigResponse { +} + +message StatusRequest{ +} + +message StatusResponse { + string Status = 1; +} + +service Configurator { + rpc Config(ConfigRequest) returns (ConfigResponse) {} + rpc Status(StatusRequest) returns (StatusResponse) {} +} diff --git a/x-pack/agent/spec/auditbeat.yml.disabled b/x-pack/agent/spec/auditbeat.yml.disabled new file mode 100644 index 00000000000..78007314feb --- /dev/null +++ b/x-pack/agent/spec/auditbeat.yml.disabled @@ -0,0 +1,34 @@ +name: Auditbeat +cmd: auditbeat +args: ["-E", "setup.ilm.enabled=false", "-E", "setup.template.enabled=false"] +rules: +- filter_values_with_regexp: + key: type + re: ^audit/.+ + selector: streams +- map: + path: streams + rules: + - translate_with_regexp: + path: type + re: ^audit/(.+) + with: $1 +- copy: + from: streams + to: auditbeat +- map: + path: auditbeat.streams + rules: + - rename: + from: type + to: module +- rename: + from: auditbeat.streams + to: modules +- filter: + selectors: + - auditbeat + - output + - keystore +when: HasItems(%{[auditbeat.modules]}) && HasNamespace('output', 'elasticsearch', + 'redis', 'kafka', 'logstash') diff --git a/x-pack/agent/spec/filebeat.yml b/x-pack/agent/spec/filebeat.yml new file mode 100644 index 00000000000..0b098e0ac75 --- /dev/null +++ b/x-pack/agent/spec/filebeat.yml @@ -0,0 +1,42 @@ +name: Filebeat +cmd: filebeat +args: ["-E", "setup.ilm.enabled=false", "-E", "setup.template.enabled=false", "-E", "management.mode=x-pack-fleet", "-E", "management.enabled=true"] +configurable: grpc +rules: +- map: + path: streams + rules: + - translate: + path: type + mapper: + event/file: log + event/stdin: stdin + event/tcp: tcp + event/udp: udp + log/docker: docker + log/redis_slowlog: redis + log/syslog: syslog +- filter_values: + selector: streams + key: type + values: + - log + - stdin + - udp + - tcp + - docker + - redis + - syslog +- copy: + from: streams + to: filebeat +- rename: + from: filebeat.streams + to: inputs +- filter: + selectors: + - filebeat + - output + - keystore +when: HasItems(%{[filebeat.inputs]}) && HasNamespace('output', 'elasticsearch', 'redis', + 'kafka', 'logstash') diff --git a/x-pack/agent/spec/heartbeat.yml.disabled b/x-pack/agent/spec/heartbeat.yml.disabled new file mode 100644 index 00000000000..14aaa1d3da4 --- /dev/null +++ b/x-pack/agent/spec/heartbeat.yml.disabled @@ -0,0 +1,27 @@ +name: Heartbeat +cmd: heartbeat +rules: +- filter_values_with_regexp: + key: type + re: ^monitor/.+ + selector: streams +- map: + path: streams + rules: + - translate_with_regexp: + path: type + re: ^monitor/(?P.+) + with: $type +- copy: + from: streams + to: heartbeat +- rename: + from: heartbeat.streams + to: monitors +- filter: + selectors: + - heartbeat + - output + - keystore +when: HasItems(%{[heartbeat.monitors]}) && HasNamespace('output', 'elasticsearch', + 'redis', 'kafka', 'logstash') diff --git a/x-pack/agent/spec/journalbeat.yml.disabled b/x-pack/agent/spec/journalbeat.yml.disabled new file mode 100644 index 00000000000..ce35cc75aaf --- /dev/null +++ b/x-pack/agent/spec/journalbeat.yml.disabled @@ -0,0 +1,22 @@ +name: Journalbeat +cmd: journalbeat +args: ["-E", "setup.ilm.enabled=false", "-E", "setup.template.enabled=false"] +rules: +- filter_values: + selector: streams + key: type + values: + - log/journal +- copy: + from: streams + to: journalbeat +- rename: + from: journalbeat.streams + to: inputs +- filter: + selectors: + - journalbeat + - output + - keystore +when: HasItems(%{[journalbeat.inputs]}) && HasNamespace('output', 'elasticsearch', + 'redis', 'kafka', 'logstash') diff --git a/x-pack/agent/spec/metricbeat.yml b/x-pack/agent/spec/metricbeat.yml new file mode 100644 index 00000000000..59f1de49a08 --- /dev/null +++ b/x-pack/agent/spec/metricbeat.yml @@ -0,0 +1,32 @@ +name: Metricbeat +cmd: metricbeat +args: ["-E", "setup.ilm.enabled=false", "-E", "setup.template.enabled=false", "-E", "management.mode=x-pack-fleet", "-E", "management.enabled=true"] +configurable: grpc +rules: +- filter_values_with_regexp: + key: type + re: ^metric/.+ + selector: streams +- map: + path: streams + rules: + - translate_with_regexp: + path: type + re: ^metric/(?P.+) + with: $type + - rename: + from: type + to: module +- copy: + from: streams + to: metricbeat +- rename: + from: metricbeat.streams + to: modules +- filter: + selectors: + - metricbeat + - output + - keystore +when: HasItems(%{[metricbeat.modules]}) && HasNamespace('output', 'elasticsearch', + 'redis', 'kafka', 'logstash') diff --git a/x-pack/filebeat/magefile.go b/x-pack/filebeat/magefile.go index ccdb4de5c6c..1441f3e9ac6 100644 --- a/x-pack/filebeat/magefile.go +++ b/x-pack/filebeat/magefile.go @@ -9,6 +9,7 @@ package main import ( "context" "fmt" + "os" "time" "github.com/magefile/mage/mg" @@ -69,7 +70,12 @@ func Package() { start := time.Now() defer func() { fmt.Println("package ran for", time.Since(start)) }() - devtools.UseElasticBeatXPackPackaging() + if v, found := os.LookupEnv("AGENT_PACKAGING"); found && v != "" { + devtools.UseElasticBeatXPackReducedPackaging() + } else { + devtools.UseElasticBeatXPackPackaging() + } + devtools.PackageKibanaDashboardsFromBuildDir() filebeat.CustomizePackaging() diff --git a/x-pack/libbeat/cmd/inject.go b/x-pack/libbeat/cmd/inject.go index 9f1fbd48197..4c0f0c1d370 100644 --- a/x-pack/libbeat/cmd/inject.go +++ b/x-pack/libbeat/cmd/inject.go @@ -11,6 +11,8 @@ import ( "github.com/elastic/beats/v7/x-pack/libbeat/licenser" _ "github.com/elastic/beats/v7/x-pack/libbeat/management" + // Register fleet + _ "github.com/elastic/beats/v7/x-pack/libbeat/management/fleet" // register processors _ "github.com/elastic/beats/v7/x-pack/libbeat/processors/add_cloudfoundry_metadata" diff --git a/x-pack/libbeat/management/config.go b/x-pack/libbeat/management/config.go index 7ff40a6a541..1e332c38ab9 100644 --- a/x-pack/libbeat/management/config.go +++ b/x-pack/libbeat/management/config.go @@ -68,11 +68,23 @@ const ManagedConfigTemplate = ` #monitoring.elasticsearch: ` -// Config for central management +const ( + // ModeCentralManagement is a default CM mode, using existing processes. + ModeCentralManagement = "x-pack-cm" + + // ModeFleet is a management mode where fleet is used to retrieve configurations. + ModeFleet = "x-pack-fleet" +) + +// Config for central management. type Config struct { // true when enrolled Enabled bool `config:"enabled" yaml:"enabled"` + // Mode specifies whether beat uses Central Management or Fleet. + // Options: [cm, fleet] + Mode string `config:"mode" yaml:"mode"` + // Poll configs period Period time.Duration `config:"period" yaml:"period"` @@ -93,6 +105,7 @@ type EventReporterConfig struct { func defaultConfig() *Config { return &Config{ + Mode: ModeCentralManagement, Period: 60 * time.Second, EventsReporter: EventReporterConfig{ Period: 30 * time.Second, @@ -111,7 +124,7 @@ type templateParams struct { BeatName string } -// OverwriteConfigFile will overwrite beat settings file with the enrolled template +// OverwriteConfigFile will overwrite beat settings file with the enrolled template. func (c *Config) OverwriteConfigFile(wr io.Writer, beatName string) error { t := template.Must(template.New("beat.management.yml").Parse(ManagedConfigTemplate)) diff --git a/x-pack/libbeat/management/error.go b/x-pack/libbeat/management/error.go index ab320159396..31fd8df3d08 100644 --- a/x-pack/libbeat/management/error.go +++ b/x-pack/libbeat/management/error.go @@ -103,6 +103,7 @@ func (er *Errors) IsEmpty() bool { return len(*er) == 0 } -func newConfigError(err error) *Error { +// NewConfigError wraps an error to be a management error of a specific ConfigError Type +func NewConfigError(err error) *Error { return &Error{Type: ConfigError, Err: err} } diff --git a/x-pack/libbeat/management/error_test.go b/x-pack/libbeat/management/error_test.go index 1416199c019..4d29d3aacad 100644 --- a/x-pack/libbeat/management/error_test.go +++ b/x-pack/libbeat/management/error_test.go @@ -54,14 +54,14 @@ func TestErrorSerialization(t *testing.T) { func TestErrors(t *testing.T) { t.Run("single error", func(t *testing.T) { - errors := Errors{newConfigError(errors.New("error1"))} + errors := Errors{NewConfigError(errors.New("error1"))} assert.Equal(t, "1 error: error1", errors.Error()) }) t.Run("multiple errors", func(t *testing.T) { errors := Errors{ - newConfigError(errors.New("error1")), - newConfigError(errors.New("error2")), + NewConfigError(errors.New("error1")), + NewConfigError(errors.New("error2")), } assert.Equal(t, "2 errors: error1; error2", errors.Error()) }) diff --git a/x-pack/libbeat/management/fleet/config.go b/x-pack/libbeat/management/fleet/config.go new file mode 100644 index 00000000000..9572d30acc9 --- /dev/null +++ b/x-pack/libbeat/management/fleet/config.go @@ -0,0 +1,27 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + xmanagement "github.com/elastic/beats/v7/x-pack/libbeat/management" +) + +// Config for central management +type Config struct { + Enabled bool `config:"enabled" yaml:"enabled"` + Mode string `config:"mode" yaml:"mode"` + Blacklist xmanagement.ConfigBlacklistSettings `config:"blacklist" yaml:"blacklist"` +} + +func defaultConfig() *Config { + return &Config{ + Mode: xmanagement.ModeCentralManagement, + Blacklist: xmanagement.ConfigBlacklistSettings{ + Patterns: map[string]string{ + "output": "console|file", + }, + }, + } +} diff --git a/x-pack/libbeat/management/fleet/config_server.go b/x-pack/libbeat/management/fleet/config_server.go new file mode 100644 index 00000000000..9373e453d91 --- /dev/null +++ b/x-pack/libbeat/management/fleet/config_server.go @@ -0,0 +1,61 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/remoteconfig/grpc" +) + +const ( + defaultTimeout = 15 * time.Second +) + +// Server is a server for handling communication between +// beat and Elastic Agent. +type Server struct { + configChan chan<- map[string]interface{} +} + +// NewConfigServer creates a new grpc configuration server for receiving +// configurations from Elastic Agent. +func NewConfigServer(configChan chan<- map[string]interface{}) *Server { + return &Server{ + configChan: configChan, + } +} + +// Config is a handler of a call made by agent pushing latest configuration. +func (s *Server) Config(ctx context.Context, req *grpc.ConfigRequest) (*grpc.ConfigResponse, error) { + cfgString := req.GetConfig() + + var configMap common.MapStr + uconfig, err := common.NewConfigFrom(cfgString) + if err != nil { + return &grpc.ConfigResponse{}, fmt.Errorf("config blocks unsuccessfully generated: %+v", err) + } + + err = uconfig.Unpack(&configMap) + if err != nil { + return &grpc.ConfigResponse{}, fmt.Errorf("config blocks unsuccessfully generated: %+v", err) + } + + select { + case s.configChan <- configMap: + case <-time.After(defaultTimeout): + return &grpc.ConfigResponse{}, errors.New("failed to push configuration: Timeout") + } + return &grpc.ConfigResponse{}, nil +} + +// Status returns OK. +func (s *Server) Status(ctx context.Context, req *grpc.StatusRequest) (*grpc.StatusResponse, error) { + return &grpc.StatusResponse{Status: "ok"}, nil +} diff --git a/x-pack/libbeat/management/fleet/manager.go b/x-pack/libbeat/management/fleet/manager.go new file mode 100644 index 00000000000..2f878206e6c --- /dev/null +++ b/x-pack/libbeat/management/fleet/manager.go @@ -0,0 +1,279 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "fmt" + "os" + "sort" + "sync" + + "github.com/gofrs/uuid" + "github.com/pkg/errors" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/cfgwarn" + "github.com/elastic/beats/v7/libbeat/common/reload" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/management" + "github.com/elastic/beats/v7/x-pack/agent/pkg/core/plugin/server" + "github.com/elastic/beats/v7/x-pack/libbeat/management/api" + + xmanagement "github.com/elastic/beats/v7/x-pack/libbeat/management" +) + +// ConfigManager provides a functionality to retrieve config channel +// using which manager is informed about config changes. +type ConfigManager interface { + ConfigChan() chan<- map[string]interface{} +} + +// Manager handles internal config updates. By retrieving +// new configs from Kibana and applying them to the Beat. +type Manager struct { + config *Config + logger *logp.Logger + beatUUID uuid.UUID + done chan struct{} + registry *reload.Registry + wg sync.WaitGroup + blacklist *xmanagement.ConfigBlacklist + + configChan chan map[string]interface{} +} + +// NewFleetManager returns a X-Pack Beats Fleet Management manager. +func NewFleetManager(config *common.Config, registry *reload.Registry, beatUUID uuid.UUID) (management.ConfigManager, error) { + c := defaultConfig() + if config.Enabled() { + if err := config.Unpack(&c); err != nil { + return nil, errors.Wrap(err, "parsing fleet management settings") + } + } + return NewFleetManagerWithConfig(c, registry, beatUUID) +} + +// NewFleetManagerWithConfig returns a X-Pack Beats Fleet Management manager. +func NewFleetManagerWithConfig(c *Config, registry *reload.Registry, beatUUID uuid.UUID) (management.ConfigManager, error) { + var blacklist *xmanagement.ConfigBlacklist + + if c.Enabled && c.Mode == xmanagement.ModeFleet { + var err error + + // Initialize configs blacklist + blacklist, err = xmanagement.NewConfigBlacklist(c.Blacklist) + if err != nil { + return nil, errors.Wrap(err, "wrong settings for configurations blacklist") + } + } + + log := logp.NewLogger(management.DebugK) + + m := &Manager{ + config: c, + blacklist: blacklist, + logger: log.Named("fleet"), + done: make(chan struct{}), + beatUUID: beatUUID, + registry: registry, + configChan: make(chan map[string]interface{}), + } + + go m.startGrpcServer() + + return m, nil +} + +// Enabled returns true if config management is enabled. +func (cm *Manager) Enabled() bool { + return cm.config.Enabled && cm.config.Mode == xmanagement.ModeFleet +} + +// ConfigChan returns a channel used to communicate configuration changes. +func (cm *Manager) ConfigChan() chan<- map[string]interface{} { + return cm.configChan +} + +// Start the config manager +func (cm *Manager) Start() { + if !cm.Enabled() { + return + } + + cfgwarn.Beta("Fleet management is enabled") + cm.logger.Info("Starting fleet management service") + + cm.wg.Add(1) + go cm.worker() +} + +// Stop the config manager +func (cm *Manager) Stop() { + if !cm.Enabled() { + return + } + + // stop collecting configuration + cm.logger.Info("Stopping fleet management service") + close(cm.done) + cm.wg.Wait() +} + +// CheckRawConfig check settings are correct to start the beat. This method +// checks there are no collision between the existing configuration and what +// fleet management can configure. +func (cm *Manager) CheckRawConfig(cfg *common.Config) error { + // TODO implement this method + return nil +} + +func (cm *Manager) worker() { + defer cm.wg.Done() + + // Start worker loop: fetch + apply new settings +WORKERLOOP: + for { + select { + case cfg := <-cm.configChan: + blocks, err := cm.toConfigBlocks(cfg) + if err != nil { + cm.logger.Errorf("Could not apply the configuration, error: %+v", err) + continue WORKERLOOP + } + + if errs := cm.apply(blocks); !errs.IsEmpty() { + cm.logger.Errorf("Could not apply the configuration, error: %+v", errs) + continue WORKERLOOP + } + case <-cm.done: + return + } + } +} + +func (cm *Manager) apply(blocks api.ConfigBlocks) xmanagement.Errors { + var errors xmanagement.Errors + missing := map[string]bool{} + for _, name := range cm.registry.GetRegisteredNames() { + missing[name] = true + } + + // Detect unwanted configs from the list + if errs := cm.blacklist.Detect(blocks); !errs.IsEmpty() { + errors = append(errors, errs...) + return errors + } + + // Reload configs + for _, b := range blocks { + if err := cm.reload(b.Type, b.Blocks); err != nil { + errors = append(errors, err) + } + missing[b.Type] = false + } + + // Unset missing configs + for name := range missing { + if missing[name] { + if err := cm.reload(name, []*api.ConfigBlock{}); err != nil { + errors = append(errors, err) + } + } + } + + return errors +} + +func (cm *Manager) reload(t string, blocks []*api.ConfigBlock) *xmanagement.Error { + cm.logger.Infof("Applying settings for %s", t) + if obj := cm.registry.GetReloadable(t); obj != nil { + // Single object + if len(blocks) > 1 { + err := fmt.Errorf("got an invalid number of configs for %s: %d, expected: 1", t, len(blocks)) + cm.logger.Error(err) + return xmanagement.NewConfigError(err) + } + + var config *reload.ConfigWithMeta + var err error + if len(blocks) == 1 { + config, err = blocks[0].ConfigWithMeta() + if err != nil { + cm.logger.Error(err) + return xmanagement.NewConfigError(err) + } + } + + if err := obj.Reload(config); err != nil { + cm.logger.Error(err) + return xmanagement.NewConfigError(err) + } + } else if obj := cm.registry.GetReloadableList(t); obj != nil { + // List + var configs []*reload.ConfigWithMeta + for _, block := range blocks { + config, err := block.ConfigWithMeta() + if err != nil { + cm.logger.Error(err) + return xmanagement.NewConfigError(err) + } + configs = append(configs, config) + } + + if err := obj.Reload(configs); err != nil { + cm.logger.Error(err) + return xmanagement.NewConfigError(err) + } + } + + return nil +} + +func (cm *Manager) toConfigBlocks(cfg common.MapStr) (api.ConfigBlocks, error) { + blocks := map[string][]*api.ConfigBlock{} + + // Extract all registered values beat can respond to + for _, regName := range cm.registry.GetRegisteredNames() { + iBlock, err := cfg.GetValue(regName) + if err != nil { + continue + } + + if mapBlock, ok := iBlock.(map[string]interface{}); ok { + blocks[regName] = append(blocks[regName], &api.ConfigBlock{Raw: mapBlock}) + } else if arrayBlock, ok := iBlock.([]interface{}); ok { + for _, item := range arrayBlock { + if mapBlock, ok := item.(map[string]interface{}); ok { + blocks[regName] = append(blocks[regName], &api.ConfigBlock{Raw: mapBlock}) + } + } + } + } + + // keep the ordering consistent while grouping the items. + keys := make([]string, 0, len(blocks)) + for k := range blocks { + keys = append(keys, k) + } + sort.Strings(keys) + + res := api.ConfigBlocks{} + for _, t := range keys { + b := blocks[t] + res = append(res, api.ConfigBlocksWithType{Type: t, Blocks: b}) + } + + return res, nil +} + +func (cm *Manager) startGrpcServer() { + cm.logger.Info("initiating fleet config manager") + s := NewConfigServer(cm.ConfigChan()) + if err := server.NewGrpcServer(os.Stdin, s); err != nil { + panic(err) + } +} + +var _ ConfigManager = &Manager{} diff --git a/x-pack/libbeat/management/fleet/manager_test.go b/x-pack/libbeat/management/fleet/manager_test.go new file mode 100644 index 00000000000..7af72a04291 --- /dev/null +++ b/x-pack/libbeat/management/fleet/manager_test.go @@ -0,0 +1,60 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "testing" + + "github.com/elastic/beats/v7/libbeat/common" + + "github.com/elastic/beats/v7/libbeat/common/reload" +) + +func TestConfigBlocks(t *testing.T) { + input := ` +filebeat: + inputs: + - type: log + paths: + - /var/log/hello1.log + - /var/log/hello2.log +output: + elasticsearch: + hosts: + - localhost:9200` + + var cfg common.MapStr + uconfig, err := common.NewConfigFrom(input) + if err != nil { + t.Fatalf("Config blocks unsuccessfully generated: %+v", err) + } + + err = uconfig.Unpack(&cfg) + if err != nil { + t.Fatalf("Config blocks unsuccessfully generated: %+v", err) + } + + reg := reload.NewRegistry() + reg.Register("output", &dummyReloadable{}) + reg.Register("filebeat.inputs", &dummyReloadable{}) + + cm := &Manager{ + registry: reg, + } + blocks, err := cm.toConfigBlocks(cfg) + if err != nil { + t.Fatalf("Config blocks unsuccessfully generated: %+v", err) + } + + if len(blocks) != 2 { + t.Fatalf("Expected 2 block have %d: %+v", len(blocks), blocks) + } +} + +type dummyReloadable struct{} + +func (dummyReloadable) Reload(config *reload.ConfigWithMeta) error { + return nil +} diff --git a/x-pack/libbeat/management/fleet/plugin.go b/x-pack/libbeat/management/fleet/plugin.go new file mode 100644 index 00000000000..edb12ca78f1 --- /dev/null +++ b/x-pack/libbeat/management/fleet/plugin.go @@ -0,0 +1,32 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/feature" + "github.com/elastic/beats/v7/libbeat/management" + xmanagement "github.com/elastic/beats/v7/x-pack/libbeat/management" +) + +func init() { + management.Register("x-pack-fleet", NewFleetManagerPlugin, feature.Beta) +} + +// NewFleetManagerPlugin creates a plugin function returning factory if configuration matches the criteria +func NewFleetManagerPlugin(config *common.Config) management.FactoryFunc { + c := defaultConfig() + if config.Enabled() { + if err := config.Unpack(&c); err != nil { + return nil + } + + if c.Mode == xmanagement.ModeFleet { + return NewFleetManager + } + } + + return nil +} diff --git a/x-pack/libbeat/management/manager.go b/x-pack/libbeat/management/manager.go index 66fdabe9d99..74770ef424a 100644 --- a/x-pack/libbeat/management/manager.go +++ b/x-pack/libbeat/management/manager.go @@ -10,7 +10,6 @@ import ( "time" "github.com/elastic/beats/v7/libbeat/common/reload" - "github.com/elastic/beats/v7/libbeat/feature" "github.com/gofrs/uuid" @@ -26,10 +25,6 @@ import ( var errEmptyAccessToken = errors.New("access_token is empty, you must reenroll your Beat") -func init() { - management.Register("x-pack", NewConfigManager, feature.Beta) -} - // ConfigManager handles internal config updates. By retrieving // new configs from Kibana and applying them to the Beat type ConfigManager struct { @@ -284,7 +279,7 @@ func (cm *ConfigManager) reload(t string, blocks []*api.ConfigBlock) *Error { if len(blocks) > 1 { err := fmt.Errorf("got an invalid number of configs for %s: %d, expected: 1", t, len(blocks)) cm.logger.Error(err) - return newConfigError(err) + return NewConfigError(err) } var config *reload.ConfigWithMeta @@ -293,13 +288,13 @@ func (cm *ConfigManager) reload(t string, blocks []*api.ConfigBlock) *Error { config, err = blocks[0].ConfigWithMeta() if err != nil { cm.logger.Error(err) - return newConfigError(err) + return NewConfigError(err) } } if err := obj.Reload(config); err != nil { cm.logger.Error(err) - return newConfigError(err) + return NewConfigError(err) } } else if obj := cm.registry.GetReloadableList(t); obj != nil { // List @@ -308,14 +303,14 @@ func (cm *ConfigManager) reload(t string, blocks []*api.ConfigBlock) *Error { config, err := block.ConfigWithMeta() if err != nil { cm.logger.Error(err) - return newConfigError(err) + return NewConfigError(err) } configs = append(configs, config) } if err := obj.Reload(configs); err != nil { cm.logger.Error(err) - return newConfigError(err) + return NewConfigError(err) } } diff --git a/x-pack/libbeat/management/manager_test.go b/x-pack/libbeat/management/manager_test.go index 90370286d14..af0c8816ebe 100644 --- a/x-pack/libbeat/management/manager_test.go +++ b/x-pack/libbeat/management/manager_test.go @@ -73,6 +73,7 @@ func TestConfigManager(t *testing.T) { config := &Config{ Enabled: true, + Mode: ModeCentralManagement, Period: 100 * time.Millisecond, Kibana: c, AccessToken: accessToken, @@ -148,6 +149,7 @@ func TestRemoveItems(t *testing.T) { config := &Config{ Enabled: true, + Mode: ModeCentralManagement, Period: 100 * time.Millisecond, Kibana: c, AccessToken: accessToken, @@ -225,6 +227,7 @@ func TestUnEnroll(t *testing.T) { config := &Config{ Enabled: true, + Mode: ModeCentralManagement, Period: 100 * time.Millisecond, Kibana: c, AccessToken: accessToken, @@ -299,6 +302,7 @@ func TestBadConfig(t *testing.T) { config := &Config{ Enabled: true, + Mode: ModeCentralManagement, Period: 100 * time.Millisecond, Kibana: c, AccessToken: accessToken, diff --git a/x-pack/libbeat/management/plugin.go b/x-pack/libbeat/management/plugin.go new file mode 100644 index 00000000000..b1e7b7d13b9 --- /dev/null +++ b/x-pack/libbeat/management/plugin.go @@ -0,0 +1,31 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package management + +import ( + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/feature" + "github.com/elastic/beats/v7/libbeat/management" +) + +func init() { + management.Register("x-pack", NewManagerPlugin, feature.Beta) +} + +// NewManagerPlugin creates a plugin function returning factory if configuration matches the criteria +func NewManagerPlugin(config *common.Config) management.FactoryFunc { + c := defaultConfig() + if config.Enabled() { + if err := config.Unpack(&c); err != nil { + return nil + } + + if c.Mode == ModeCentralManagement { + return NewConfigManager + } + } + + return nil +} diff --git a/x-pack/metricbeat/magefile.go b/x-pack/metricbeat/magefile.go index 5e30c711cc4..86572c9be90 100644 --- a/x-pack/metricbeat/magefile.go +++ b/x-pack/metricbeat/magefile.go @@ -9,6 +9,7 @@ package main import ( "context" "fmt" + "os" "time" "github.com/magefile/mage/mg" @@ -69,7 +70,12 @@ func Package() { start := time.Now() defer func() { fmt.Println("package ran for", time.Since(start)) }() - devtools.UseElasticBeatXPackPackaging() + if v, found := os.LookupEnv("AGENT_PACKAGING"); found && v != "" { + devtools.UseElasticBeatXPackReducedPackaging() + } else { + devtools.UseElasticBeatXPackPackaging() + } + metricbeat.CustomizePackaging() devtools.PackageKibanaDashboardsFromBuildDir()