From c053a322bf73e0c89bd2f404becee6786bd1df15 Mon Sep 17 00:00:00 2001 From: Mathieu Martin Date: Thu, 20 Dec 2018 14:30:22 -0500 Subject: [PATCH] Convert Filebeat kafka.* to ECS (#9297) - Map 2 kafka.* fields to ECS: - kafka.log.level => log.level - kafka.log.message => message --- CHANGELOG.asciidoc | 1 + dev-tools/ecs-migration.yml | 10 ++ filebeat/docs/fields.asciidoc | 18 +--- filebeat/module/kafka/fields.go | 2 +- filebeat/module/kafka/log/_meta/fields.yml | 16 ++- .../module/kafka/log/ingest/pipeline.json | 6 +- .../log/test/controller.log-expected.json | 100 +++++++----------- .../kafka/log/test/server.log-expected.json | 100 +++++++----------- .../test/state-change-1.1.0.log-expected.json | 5 +- .../test/state-change-2.0.0.log-expected.json | 5 +- .../log/test/state-change.log-expected.json | 5 +- 11 files changed, 112 insertions(+), 156 deletions(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 6b6076f65d5c..a7910878cd5c 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -182,6 +182,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha1...v7.0.0-alpha2[Check the - Rename many `redis.log.*` fields to map to ECS. {pull}9315[9315] - Rename many `icinga.*` fields to map to ECS. {pull}9294[9294] - Rename many `postgresql.log.*` fields to map to ECS. {pull}9303[9303] +- Rename many `kafka.log.*` fields to map to ECS. {pull}9297[9297] *Metricbeat* diff --git a/dev-tools/ecs-migration.yml b/dev-tools/ecs-migration.yml index d7d0cd809311..0d038792b9c9 100644 --- a/dev-tools/ecs-migration.yml +++ b/dev-tools/ecs-migration.yml @@ -376,6 +376,16 @@ to: network.forwarded_ip alias: true +## Kafka module + +- from: kafka.log.level + to: log.level + alias: true + +- from: kafka.log.message + to: message + alias: true + ## NGINX module - from: nginx.access.user_name diff --git a/filebeat/docs/fields.asciidoc b/filebeat/docs/fields.asciidoc index e1ac0b5811f1..e960e166be05 100644 --- a/filebeat/docs/fields.asciidoc +++ b/filebeat/docs/fields.asciidoc @@ -5444,31 +5444,21 @@ Kafka log lines. -*`kafka.log.timestamp`*:: -+ --- -The timestamp from the log line. - - --- - *`kafka.log.level`*:: + -- -example: WARN - -The log level. +type: alias +alias to: log.level -- *`kafka.log.message`*:: + -- -type: text - -The logged message. +type: alias +alias to: message -- diff --git a/filebeat/module/kafka/fields.go b/filebeat/module/kafka/fields.go index 54dc41a690de..c76d549e7b16 100644 --- a/filebeat/module/kafka/fields.go +++ b/filebeat/module/kafka/fields.go @@ -31,5 +31,5 @@ func init() { // Asset returns asset data func Asset() string { - return "eJysk81uwjAQhO95ihF3eIAcKlW9tWoPFVLPFtkEK/6TvVB4+8px+XNNIBV7i1f+ZjK7nqOnfY1etL2oAJasqMbsLX7PKqChsPLSsbSmxlMFAEMP2jYbRRXQSlJNqIfWHEZoOuFi8d5Rjc7bjfs9KTAvMecoZbvjWQl2FZgqmVW2g5KGwuKsmSueq7LUFFhod9EdVYq1XNPpKlpvNXhNR/lFUUvRllSGop3QbpjE1/Pnx2y6i0EygsuamkIQHWVXU7pMO/6XYEfNAVxWXVntrCGT45NuT/tv65tp0i8H5DFoGaKONN2Q/xUjSoTwoJ9/FVuRgJMssBercv75dt/hAVhGHKQZ2bfyxt+KBTfmc5e7PylxsjuSE25uK8aGdqct4D2x4YRn2Pbk7rqddqPy9/oYL/EdRfghnnyaPwEAAP//nnhlvw==" + return "eJysk09uszAQxfec4in7cAAW3+bbteouFxiFgVgY27KHtLl9Zdz8c51ApM4OD37vx5thi4FPDQbqBqoAUaK5weY9Pm8qoOWw98qJsqbBvwoA5h5G206aK6BTrNvQzK0tDI18lYslJ8cNem8n93NS0LyXuZXStr+clcQeCqZKsNr20MpwqG+aueOdKx9Z33XO3qQVhazjSA4zaV26N6reU2ITP3HRb+QQqOcXHcu3HvkVjfd2dNawkaL1wKdP69us9yTuWP/PkpADz9GrEH2U6dF5O9ZlEE0h/8oEIfyV0y0QvNGRkuBLCOJpXx5BvnArGIBdlIMyF4a4gXX2YmkJl2LBwnxW0f1KSRLuk5ywuLB4NrSVWMBH0oYjL7Ddle4xTjfp/Lf7G5bdgWfxczz5NL8DAAD//1wZSZY=" } diff --git a/filebeat/module/kafka/log/_meta/fields.yml b/filebeat/module/kafka/log/_meta/fields.yml index e9d234cb625e..76d9467bb044 100644 --- a/filebeat/module/kafka/log/_meta/fields.yml +++ b/filebeat/module/kafka/log/_meta/fields.yml @@ -3,17 +3,15 @@ description: > Kafka log lines. fields: - - name: timestamp - description: > - The timestamp from the log line. - name: level - example: "WARN" - description: > - The log level. + type: alias + path: log.level + migration: true - name: message - type: text - description: > - The logged message. + type: alias + path: message + migration: true + - name: component type: keyword description: > diff --git a/filebeat/module/kafka/log/ingest/pipeline.json b/filebeat/module/kafka/log/ingest/pipeline.json index 4f38d6cd839e..c975b00602da 100644 --- a/filebeat/module/kafka/log/ingest/pipeline.json +++ b/filebeat/module/kafka/log/ingest/pipeline.json @@ -6,18 +6,18 @@ "field": "message", "trace_match": true, "patterns": [ - "(?m)%{TIMESTAMP_ISO8601:kafka.log.timestamp}. %{LOGLEVEL:kafka.log.level} +%{JAVALOGMESSAGE:kafka.log.message} \\(%{JAVACLASS:kafka.log.class}\\)$[ \\n]*(?'kafka.log.trace.full'.*)" + "(?m)%{TIMESTAMP_ISO8601:kafka.log.timestamp}. %{LOGLEVEL:log.level} +%{JAVALOGMESSAGE:message} \\(%{JAVACLASS:kafka.log.class}\\)$[ \\n]*(?'kafka.log.trace.full'.*)" ] } }, { "grok": { - "field": "kafka.log.message", + "field": "message", "pattern_definitions": { "KAFKA_COMPONENT": "[^\\]]*" }, "patterns": [ - "\\[%{KAFKA_COMPONENT:kafka.log.component}\\][,:.]? +%{JAVALOGMESSAGE:kafka.log.message}" + "\\[%{KAFKA_COMPONENT:kafka.log.component}\\][,:.]? +%{JAVALOGMESSAGE:message}" ], "on_failure": [ { diff --git a/filebeat/module/kafka/log/test/controller.log-expected.json b/filebeat/module/kafka/log/test/controller.log-expected.json index 2c8f20b4fe8b..73f5b99b1c83 100644 --- a/filebeat/module/kafka/log/test/controller.log-expected.json +++ b/filebeat/module/kafka/log/test/controller.log-expected.json @@ -6,10 +6,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.ControllerEventManager$ControllerEventThread", "kafka.log.component": "controller-event-thread", - "kafka.log.level": "INFO", - "kafka.log.message": "Starting", + "log.level": "INFO", "log.offset": 0, - "message": "[2017-08-04 10:48:21,048] INFO [controller-event-thread]: Starting (kafka.controller.ControllerEventManager$ControllerEventThread)" + "message": "Starting" }, { "@timestamp": "2017-08-04T10:48:21.063Z", @@ -18,10 +17,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", "kafka.log.component": "Controller 0", - "kafka.log.level": "INFO", - "kafka.log.message": "0 successfully elected as the controller", + "log.level": "INFO", "log.offset": 131, - "message": "[2017-08-04 10:48:21,063] INFO [Controller 0]: 0 successfully elected as the controller (kafka.controller.KafkaController)" + "message": "0 successfully elected as the controller" }, { "@timestamp": "2017-08-04T10:48:21.064Z", @@ -30,10 +28,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", "kafka.log.component": "Controller 0", - "kafka.log.level": "INFO", - "kafka.log.message": "Broker 0 starting become controller state transition", + "log.level": "INFO", "log.offset": 254, - "message": "[2017-08-04 10:48:21,064] INFO [Controller 0]: Broker 0 starting become controller state transition (kafka.controller.KafkaController)" + "message": "Broker 0 starting become controller state transition" }, { "@timestamp": "2017-08-04T10:48:21.082Z", @@ -42,10 +39,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", "kafka.log.component": "Controller 0", - "kafka.log.level": "INFO", - "kafka.log.message": "Controller 0 incremented epoch to 1", + "log.level": "INFO", "log.offset": 389, - "message": "[2017-08-04 10:48:21,082] INFO [Controller 0]: Controller 0 incremented epoch to 1 (kafka.controller.KafkaController)" + "message": "Controller 0 incremented epoch to 1" }, { "@timestamp": "2017-08-04T10:48:21.085Z", @@ -54,10 +50,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", "kafka.log.component": "Controller 0", - "kafka.log.level": "DEBUG", - "kafka.log.message": "Registering IsrChangeNotificationListener", + "log.level": "DEBUG", "log.offset": 507, - "message": "[2017-08-04 10:48:21,085] DEBUG [Controller 0]: Registering IsrChangeNotificationListener (kafka.controller.KafkaController)" + "message": "Registering IsrChangeNotificationListener" }, { "@timestamp": "2017-08-04T10:48:21.154Z", @@ -66,10 +61,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.ReplicaStateMachine", "kafka.log.component": "Replica state machine on controller 0", - "kafka.log.level": "INFO", - "kafka.log.message": "Started replica state machine with initial state -> Map()", + "log.level": "INFO", "log.offset": 632, - "message": "[2017-08-04 10:48:21,154] INFO [Replica state machine on controller 0]: Started replica state machine with initial state -> Map() (kafka.controller.ReplicaStateMachine)" + "message": "Started replica state machine with initial state -> Map()" }, { "@timestamp": "2017-08-04T10:48:21.156Z", @@ -78,10 +72,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.PartitionStateMachine", "kafka.log.component": "Partition state machine on Controller 0", - "kafka.log.level": "INFO", - "kafka.log.message": "Started partition state machine with initial state -> Map()", + "log.level": "INFO", "log.offset": 801, - "message": "[2017-08-04 10:48:21,156] INFO [Partition state machine on Controller 0]: Started partition state machine with initial state -> Map() (kafka.controller.PartitionStateMachine)" + "message": "Started partition state machine with initial state -> Map()" }, { "@timestamp": "2017-08-04T10:48:21.157Z", @@ -90,10 +83,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", "kafka.log.component": "Controller 0", - "kafka.log.level": "INFO", - "kafka.log.message": "Broker 0 is ready to serve as the new controller with epoch 1", + "log.level": "INFO", "log.offset": 976, - "message": "[2017-08-04 10:48:21,157] INFO [Controller 0]: Broker 0 is ready to serve as the new controller with epoch 1 (kafka.controller.KafkaController)" + "message": "Broker 0 is ready to serve as the new controller with epoch 1" }, { "@timestamp": "2017-08-04T10:48:21.165Z", @@ -102,10 +94,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.PartitionStateMachine", "kafka.log.component": "Partition state machine on Controller 0", - "kafka.log.level": "INFO", - "kafka.log.message": "Invoking state change to OnlinePartition for partitions ", + "log.level": "INFO", "log.offset": 1120, - "message": "[2017-08-04 10:48:21,165] INFO [Partition state machine on Controller 0]: Invoking state change to OnlinePartition for partitions (kafka.controller.PartitionStateMachine)" + "message": "Invoking state change to OnlinePartition for partitions " }, { "@timestamp": "2017-08-04T11:44:22.588Z", @@ -114,10 +105,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", "kafka.log.component": "Controller 0", - "kafka.log.level": "DEBUG", - "kafka.log.message": "Live brokers: ", + "log.level": "DEBUG", "log.offset": 1292, - "message": "[2017-08-04 11:44:22,588] DEBUG [Controller 0]: Live brokers: (kafka.controller.KafkaController)" + "message": "Live brokers: " }, { "@timestamp": "2017-08-04T11:44:25.094Z", @@ -126,10 +116,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.ControllerEventManager$ControllerEventThread", "kafka.log.component": "controller-event-thread", - "kafka.log.level": "INFO", - "kafka.log.message": "Shutting down", + "log.level": "INFO", "log.offset": 1390, - "message": "[2017-08-04 11:44:25,094] INFO [controller-event-thread]: Shutting down (kafka.controller.ControllerEventManager$ControllerEventThread)" + "message": "Shutting down" }, { "@timestamp": "2017-08-04T11:44:25.095Z", @@ -138,10 +127,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.ControllerEventManager$ControllerEventThread", "kafka.log.component": "controller-event-thread", - "kafka.log.level": "INFO", - "kafka.log.message": "Stopped", + "log.level": "INFO", "log.offset": 1526, - "message": "[2017-08-04 11:44:25,095] INFO [controller-event-thread]: Stopped (kafka.controller.ControllerEventManager$ControllerEventThread)" + "message": "Stopped" }, { "@timestamp": "2017-08-04T11:44:25.097Z", @@ -150,10 +138,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.ControllerEventManager$ControllerEventThread", "kafka.log.component": "controller-event-thread", - "kafka.log.level": "INFO", - "kafka.log.message": "Shutdown completed", + "log.level": "INFO", "log.offset": 1656, - "message": "[2017-08-04 11:44:25,097] INFO [controller-event-thread]: Shutdown completed (kafka.controller.ControllerEventManager$ControllerEventThread)" + "message": "Shutdown completed" }, { "@timestamp": "2017-08-04T11:44:25.099Z", @@ -162,10 +149,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", "kafka.log.component": "Controller 0", - "kafka.log.level": "DEBUG", - "kafka.log.message": "Controller resigning, broker id 0", + "log.level": "DEBUG", "log.offset": 1797, - "message": "[2017-08-04 11:44:25,099] DEBUG [Controller 0]: Controller resigning, broker id 0 (kafka.controller.KafkaController)" + "message": "Controller resigning, broker id 0" }, { "@timestamp": "2017-08-04T11:44:25.100Z", @@ -174,10 +160,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", "kafka.log.component": "Controller 0", - "kafka.log.level": "DEBUG", - "kafka.log.message": "De-registering IsrChangeNotificationListener", + "log.level": "DEBUG", "log.offset": 1914, - "message": "[2017-08-04 11:44:25,100] DEBUG [Controller 0]: De-registering IsrChangeNotificationListener (kafka.controller.KafkaController)" + "message": "De-registering IsrChangeNotificationListener" }, { "@timestamp": "2017-08-04T11:44:25.105Z", @@ -186,10 +171,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.PartitionStateMachine", "kafka.log.component": "Partition state machine on Controller 0", - "kafka.log.level": "INFO", - "kafka.log.message": "Stopped partition state machine", + "log.level": "INFO", "log.offset": 2042, - "message": "[2017-08-04 11:44:25,105] INFO [Partition state machine on Controller 0]: Stopped partition state machine (kafka.controller.PartitionStateMachine)" + "message": "Stopped partition state machine" }, { "@timestamp": "2017-08-04T11:44:25.111Z", @@ -198,10 +182,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.ReplicaStateMachine", "kafka.log.component": "Replica state machine on controller 0", - "kafka.log.level": "INFO", - "kafka.log.message": "Stopped replica state machine", + "log.level": "INFO", "log.offset": 2189, - "message": "[2017-08-04 11:44:25,111] INFO [Replica state machine on controller 0]: Stopped replica state machine (kafka.controller.ReplicaStateMachine)" + "message": "Stopped replica state machine" }, { "@timestamp": "2017-08-04T11:44:25.112Z", @@ -210,10 +193,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.RequestSendThread", "kafka.log.component": "Controller-0-to-broker-0-send-thread", - "kafka.log.level": "INFO", - "kafka.log.message": "Shutting down", + "log.level": "INFO", "log.offset": 2330, - "message": "[2017-08-04 11:44:25,112] INFO [Controller-0-to-broker-0-send-thread]: Shutting down (kafka.controller.RequestSendThread)" + "message": "Shutting down" }, { "@timestamp": "2017-08-04T11:44:25.112Z", @@ -222,10 +204,9 @@ "input.type": "log", "kafka.log.class": "kafka.controller.RequestSendThread", "kafka.log.component": "Controller-0-to-broker-0-send-thread", - "kafka.log.level": "INFO", - "kafka.log.message": "Stopped", + "log.level": "INFO", "log.offset": 2452, - "message": "[2017-08-04 11:44:25,112] INFO [Controller-0-to-broker-0-send-thread]: Stopped (kafka.controller.RequestSendThread)" + "message": "Stopped" }, { "@timestamp": "2017-08-04T11:44:25.113Z", @@ -234,9 +215,8 @@ "input.type": "log", "kafka.log.class": "kafka.controller.RequestSendThread", "kafka.log.component": "Controller-0-to-broker-0-send-thread", - "kafka.log.level": "INFO", - "kafka.log.message": "Shutdown completed", + "log.level": "INFO", "log.offset": 2568, - "message": "[2017-08-04 11:44:25,113] INFO [Controller-0-to-broker-0-send-thread]: Shutdown completed (kafka.controller.RequestSendThread)" + "message": "Shutdown completed" } ] \ No newline at end of file diff --git a/filebeat/module/kafka/log/test/server.log-expected.json b/filebeat/module/kafka/log/test/server.log-expected.json index 973869080f3c..e7de98a23682 100644 --- a/filebeat/module/kafka/log/test/server.log-expected.json +++ b/filebeat/module/kafka/log/test/server.log-expected.json @@ -6,10 +6,9 @@ "input.type": "log", "kafka.log.class": "kafka.server.KafkaServer", "kafka.log.component": "unknown", - "kafka.log.level": "INFO", - "kafka.log.message": "starting", + "log.level": "INFO", "log.offset": 0, - "message": "[2017-08-04 10:48:20,377] INFO starting (kafka.server.KafkaServer)" + "message": "starting" }, { "@timestamp": "2017-08-04T10:48:20.379Z", @@ -18,10 +17,9 @@ "input.type": "log", "kafka.log.class": "kafka.server.KafkaServer", "kafka.log.component": "unknown", - "kafka.log.level": "INFO", - "kafka.log.message": "Connecting to zookeeper on localhost:2181", + "log.level": "INFO", "log.offset": 67, - "message": "[2017-08-04 10:48:20,379] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer)" + "message": "Connecting to zookeeper on localhost:2181" }, { "@timestamp": "2017-08-04T10:48:20.400Z", @@ -30,10 +28,9 @@ "input.type": "log", "kafka.log.class": "org.apache.zookeeper.ZooKeeper", "kafka.log.component": "unknown", - "kafka.log.level": "INFO", - "kafka.log.message": "Client environment:java.io.tmpdir=/tmp", + "log.level": "INFO", "log.offset": 167, - "message": "[2017-08-04 10:48:20,400] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper)" + "message": "Client environment:java.io.tmpdir=/tmp" }, { "@timestamp": "2017-08-04T10:48:20.400Z", @@ -42,10 +39,9 @@ "input.type": "log", "kafka.log.class": "org.apache.zookeeper.ZooKeeper", "kafka.log.component": "unknown", - "kafka.log.level": "INFO", - "kafka.log.message": "Client environment:java.compiler=", + "log.level": "INFO", "log.offset": 270, - "message": "[2017-08-04 10:48:20,400] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper)" + "message": "Client environment:java.compiler=" }, { "@timestamp": "2017-08-04T10:48:20.401Z", @@ -54,10 +50,9 @@ "input.type": "log", "kafka.log.class": "org.apache.zookeeper.ZooKeeper", "kafka.log.component": "unknown", - "kafka.log.level": "INFO", - "kafka.log.message": "Initiating client connection, connectString=localhost:2181 sessionTimeout=6000 watcher=org.I0Itec.zkclient.ZkClient@5ffead27", + "log.level": "INFO", "log.offset": 372, - "message": "[2017-08-04 10:48:20,401] INFO Initiating client connection, connectString=localhost:2181 sessionTimeout=6000 watcher=org.I0Itec.zkclient.ZkClient@5ffead27 (org.apache.zookeeper.ZooKeeper)" + "message": "Initiating client connection, connectString=localhost:2181 sessionTimeout=6000 watcher=org.I0Itec.zkclient.ZkClient@5ffead27" }, { "@timestamp": "2017-08-04T10:48:20.413Z", @@ -66,10 +61,9 @@ "input.type": "log", "kafka.log.class": "org.I0Itec.zkclient.ZkClient", "kafka.log.component": "unknown", - "kafka.log.level": "INFO", - "kafka.log.message": "Waiting for keeper state SyncConnected", + "log.level": "INFO", "log.offset": 561, - "message": "[2017-08-04 10:48:20,413] INFO Waiting for keeper state SyncConnected (org.I0Itec.zkclient.ZkClient)" + "message": "Waiting for keeper state SyncConnected" }, { "@timestamp": "2017-08-04T10:48:20.415Z", @@ -78,10 +72,9 @@ "input.type": "log", "kafka.log.class": "org.apache.zookeeper.ClientCnxn", "kafka.log.component": "unknown", - "kafka.log.level": "INFO", - "kafka.log.message": "Opening socket connection to server localhost/0:0:0:0:0:0:0:1:2181. Will not attempt to authenticate using SASL (unknown error)", + "log.level": "INFO", "log.offset": 662, - "message": "[2017-08-04 10:48:20,415] INFO Opening socket connection to server localhost/0:0:0:0:0:0:0:1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn)" + "message": "Opening socket connection to server localhost/0:0:0:0:0:0:0:1:2181. Will not attempt to authenticate using SASL (unknown error)" }, { "@timestamp": "2017-08-04T10:48:20.420Z", @@ -90,10 +83,9 @@ "input.type": "log", "kafka.log.class": "org.apache.zookeeper.ClientCnxn", "kafka.log.component": "unknown", - "kafka.log.level": "INFO", - "kafka.log.message": "Socket connection established to localhost/0:0:0:0:0:0:0:1:2181, initiating session", + "log.level": "INFO", "log.offset": 855, - "message": "[2017-08-04 10:48:20,420] INFO Socket connection established to localhost/0:0:0:0:0:0:0:1:2181, initiating session (org.apache.zookeeper.ClientCnxn)" + "message": "Socket connection established to localhost/0:0:0:0:0:0:0:1:2181, initiating session" }, { "@timestamp": "2017-08-04T10:48:20.457Z", @@ -102,10 +94,9 @@ "input.type": "log", "kafka.log.class": "org.apache.zookeeper.ClientCnxn", "kafka.log.component": "unknown", - "kafka.log.level": "INFO", - "kafka.log.message": "Session establishment complete on server localhost/0:0:0:0:0:0:0:1:2181, sessionid = 0x15dabf8d4140000, negotiated timeout = 6000", + "log.level": "INFO", "log.offset": 1004, - "message": "[2017-08-04 10:48:20,457] INFO Session establishment complete on server localhost/0:0:0:0:0:0:0:1:2181, sessionid = 0x15dabf8d4140000, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn)" + "message": "Session establishment complete on server localhost/0:0:0:0:0:0:0:1:2181, sessionid = 0x15dabf8d4140000, negotiated timeout = 6000" }, { "@timestamp": "2017-08-04T10:48:20.458Z", @@ -114,10 +105,9 @@ "input.type": "log", "kafka.log.class": "org.I0Itec.zkclient.ZkClient", "kafka.log.component": "unknown", - "kafka.log.level": "INFO", - "kafka.log.message": "zookeeper state changed (SyncConnected)", + "log.level": "INFO", "log.offset": 1199, - "message": "[2017-08-04 10:48:20,458] INFO zookeeper state changed (SyncConnected) (org.I0Itec.zkclient.ZkClient)" + "message": "zookeeper state changed (SyncConnected)" }, { "@timestamp": "2017-08-04T10:48:20.748Z", @@ -126,10 +116,9 @@ "input.type": "log", "kafka.log.class": "kafka.server.BrokerMetadataCheckpoint", "kafka.log.component": "unknown", - "kafka.log.level": "WARN", - "kafka.log.message": "No meta.properties file under dir /tmp/kafka-logs/meta.properties", + "log.level": "WARN", "log.offset": 1301, - "message": "[2017-08-04 10:48:20,748] WARN No meta.properties file under dir /tmp/kafka-logs/meta.properties (kafka.server.BrokerMetadataCheckpoint)" + "message": "No meta.properties file under dir /tmp/kafka-logs/meta.properties" }, { "@timestamp": "2017-08-04T10:48:20.800Z", @@ -138,10 +127,9 @@ "input.type": "log", "kafka.log.class": "kafka.server.ClientQuotaManager$ThrottledRequestReaper", "kafka.log.component": "ThrottledRequestReaper-Fetch", - "kafka.log.level": "INFO", - "kafka.log.message": "Starting", + "log.level": "INFO", "log.offset": 1438, - "message": "[2017-08-04 10:48:20,800] INFO [ThrottledRequestReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledRequestReaper)" + "message": "Starting" }, { "@timestamp": "2017-08-04T10:48:20.866Z", @@ -150,10 +138,9 @@ "input.type": "log", "kafka.log.class": "kafka.log.LogManager", "kafka.log.component": "unknown", - "kafka.log.level": "INFO", - "kafka.log.message": "Log directory '/tmp/kafka-logs' not found, creating it.", + "log.level": "INFO", "log.offset": 1567, - "message": "[2017-08-04 10:48:20,866] INFO Log directory '/tmp/kafka-logs' not found, creating it. (kafka.log.LogManager)" + "message": "Log directory '/tmp/kafka-logs' not found, creating it." }, { "@timestamp": "2017-08-04T10:48:20.873Z", @@ -162,10 +149,9 @@ "input.type": "log", "kafka.log.class": "kafka.log.LogManager", "kafka.log.component": "unknown", - "kafka.log.level": "INFO", - "kafka.log.message": "Loading logs.", + "log.level": "INFO", "log.offset": 1677, - "message": "[2017-08-04 10:48:20,873] INFO Loading logs. (kafka.log.LogManager)" + "message": "Loading logs." }, { "@timestamp": "2017-08-04T10:48:21.062Z", @@ -174,10 +160,9 @@ "input.type": "log", "kafka.log.class": "kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper", "kafka.log.component": "ExpirationReaper-0-Heartbeat", - "kafka.log.level": "INFO", - "kafka.log.message": "Starting", + "log.level": "INFO", "log.offset": 1745, - "message": "[2017-08-04 10:48:21,062] INFO [ExpirationReaper-0-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)" + "message": "Starting" }, { "@timestamp": "2017-08-04T10:48:21.063Z", @@ -186,10 +171,9 @@ "input.type": "log", "kafka.log.class": "kafka.utils.ZKCheckedEphemeral", "kafka.log.component": "unknown", - "kafka.log.level": "INFO", - "kafka.log.message": "Result of znode creation is: OK", + "log.level": "INFO", "log.offset": 1881, - "message": "[2017-08-04 10:48:21,063] INFO Result of znode creation is: OK (kafka.utils.ZKCheckedEphemeral)" + "message": "Result of znode creation is: OK" }, { "@timestamp": "2017-08-04T10:48:21.095Z", @@ -198,10 +182,9 @@ "input.type": "log", "kafka.log.class": "kafka.coordinator.group.GroupMetadataManager", "kafka.log.component": "Group Metadata Manager on Broker 0", - "kafka.log.level": "INFO", - "kafka.log.message": "Removed 0 expired offsets in 1 milliseconds.", + "log.level": "INFO", "log.offset": 1977, - "message": "[2017-08-04 10:48:21,095] INFO [Group Metadata Manager on Broker 0]: Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)" + "message": "Removed 0 expired offsets in 1 milliseconds." }, { "@timestamp": "2017-08-04T10:48:21.127Z", @@ -210,10 +193,9 @@ "input.type": "log", "kafka.log.class": "kafka.coordinator.transaction.ProducerIdManager", "kafka.log.component": "ProducerId Manager 0", - "kafka.log.level": "INFO", - "kafka.log.message": "Acquired new producerId block (brokerId:0,blockStartProducerId:0,blockEndProducerId:999) by writing to Zk with path version 1", + "log.level": "INFO", "log.offset": 2138, - "message": "[2017-08-04 10:48:21,127] INFO [ProducerId Manager 0]: Acquired new producerId block (brokerId:0,blockStartProducerId:0,blockEndProducerId:999) by writing to Zk with path version 1 (kafka.coordinator.transaction.ProducerIdManager)" + "message": "Acquired new producerId block (brokerId:0,blockStartProducerId:0,blockEndProducerId:999) by writing to Zk with path version 1" }, { "@timestamp": "2017-08-04T10:48:21.162Z", @@ -222,10 +204,9 @@ "input.type": "log", "kafka.log.class": "kafka.coordinator.transaction.TransactionCoordinator", "kafka.log.component": "Transaction Coordinator 0", - "kafka.log.level": "INFO", - "kafka.log.message": "Starting up.", + "log.level": "INFO", "log.offset": 2369, - "message": "[2017-08-04 10:48:21,162] INFO [Transaction Coordinator 0]: Starting up. (kafka.coordinator.transaction.TransactionCoordinator)" + "message": "Starting up." }, { "@timestamp": "2017-08-04T10:48:21.167Z", @@ -234,9 +215,8 @@ "input.type": "log", "kafka.log.class": "kafka.coordinator.transaction.TransactionMarkerChannelManager", "kafka.log.component": "Transaction Marker Channel Manager 0", - "kafka.log.level": "INFO", - "kafka.log.message": "Starting", + "log.level": "INFO", "log.offset": 2497, - "message": "[2017-08-04 10:48:21,167] INFO [Transaction Marker Channel Manager 0]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager)" + "message": "Starting" } ] \ No newline at end of file diff --git a/filebeat/module/kafka/log/test/state-change-1.1.0.log-expected.json b/filebeat/module/kafka/log/test/state-change-1.1.0.log-expected.json index 3ca5d7215552..95dd48c3ccc0 100644 --- a/filebeat/module/kafka/log/test/state-change-1.1.0.log-expected.json +++ b/filebeat/module/kafka/log/test/state-change-1.1.0.log-expected.json @@ -6,9 +6,8 @@ "input.type": "log", "kafka.log.class": "state.change.logger", "kafka.log.component": "Broker id=30", - "kafka.log.level": "TRACE", - "kafka.log.message": "Cached leader info PartitionState(controllerEpoch=25, leader=-1, leaderEpoch=15, isr=[10], zkVersion=15, replicas=[10], offlineReplicas=[10]) for partition __consumer_offsets-16 in response to UpdateMetadata request sent by controller 20 epoch 25 with correlation id 8", + "log.level": "TRACE", "log.offset": 0, - "message": "[2018-07-16 10:17:06,489] TRACE [Broker id=30] Cached leader info PartitionState(controllerEpoch=25, leader=-1, leaderEpoch=15, isr=[10], zkVersion=15, replicas=[10], offlineReplicas=[10]) for partition __consumer_offsets-16 in response to UpdateMetadata request sent by controller 20 epoch 25 with correlation id 8 (state.change.logger)" + "message": "Cached leader info PartitionState(controllerEpoch=25, leader=-1, leaderEpoch=15, isr=[10], zkVersion=15, replicas=[10], offlineReplicas=[10]) for partition __consumer_offsets-16 in response to UpdateMetadata request sent by controller 20 epoch 25 with correlation id 8" } ] \ No newline at end of file diff --git a/filebeat/module/kafka/log/test/state-change-2.0.0.log-expected.json b/filebeat/module/kafka/log/test/state-change-2.0.0.log-expected.json index dcb7a01e9225..64414b8b0a0c 100644 --- a/filebeat/module/kafka/log/test/state-change-2.0.0.log-expected.json +++ b/filebeat/module/kafka/log/test/state-change-2.0.0.log-expected.json @@ -6,12 +6,11 @@ "input.type": "log", "kafka.log.class": "state.change.logger", "kafka.log.component": "Broker id=20", - "kafka.log.level": "TRACE", - "kafka.log.message": "Cached leader info PartitionState(controllerEpoch=5, leader=20, leaderEpoch=0, isr=[20], zkVersion=0, replicas=[20], offlineReplicas=[]) for partition foo-0 in response to UpdateMetadata request sent by controller 10 epoch 5 with correlation id 146", "log.flags": [ "multiline" ], + "log.level": "TRACE", "log.offset": 0, - "message": "[2018-10-31 15:09:30,451] TRACE [Broker id=20] Cached leader info PartitionState(controllerEpoch=5, leader=20, leaderEpoch=0, isr=[20], zkVersion=0, replicas=[20], offlineReplicas=[]) for partition foo-0 in response to UpdateMetadata request sent by controller 10 epoch 5 with correlation id 146 (state.change.logger)\n" + "message": "Cached leader info PartitionState(controllerEpoch=5, leader=20, leaderEpoch=0, isr=[20], zkVersion=0, replicas=[20], offlineReplicas=[]) for partition foo-0 in response to UpdateMetadata request sent by controller 10 epoch 5 with correlation id 146" } ] \ No newline at end of file diff --git a/filebeat/module/kafka/log/test/state-change.log-expected.json b/filebeat/module/kafka/log/test/state-change.log-expected.json index 9cd3e1667ff7..a8013db2d38e 100644 --- a/filebeat/module/kafka/log/test/state-change.log-expected.json +++ b/filebeat/module/kafka/log/test/state-change.log-expected.json @@ -6,9 +6,8 @@ "input.type": "log", "kafka.log.class": "state.change.logger", "kafka.log.component": "unknown", - "kafka.log.level": "TRACE", - "kafka.log.message": "Controller 0 epoch 1 received response {error_code=0} for a request sent to broker baldur:9092 (id: 0 rack: null)", + "log.level": "TRACE", "log.offset": 0, - "message": "[2017-08-04 10:48:21,428] TRACE Controller 0 epoch 1 received response {error_code=0} for a request sent to broker baldur:9092 (id: 0 rack: null) (state.change.logger)" + "message": "Controller 0 epoch 1 received response {error_code=0} for a request sent to broker baldur:9092 (id: 0 rack: null)" } ] \ No newline at end of file