diff --git a/charts/feature-cluster-events/README.md b/charts/feature-cluster-events/README.md index 79d016833..b4c61c8fc 100644 --- a/charts/feature-cluster-events/README.md +++ b/charts/feature-cluster-events/README.md @@ -44,7 +44,9 @@ Be sure perform actual integration testing in a live environment in the main [k8 | Key | Type | Default | Description | |-----|------|---------|-------------| -| extraProcessingStages | string | `""` | Stage blocks to be added to the loki.process component for cluster events. ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/#blocks)) This value is templated so that you can refer to other values from this file. | +| extraLogProcessingStages | string | `""` | Stage blocks to be added to the loki.process component for cluster events. ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/#blocks)) This value is templated so that you can refer to other values from this file. | +| labelsToKeep | list | `["job","level","namespace","node","source"]` | The list of labels to keep on the logs, all other pipeline labels will be dropped. | +| structuredMetadata | object | `{}` | The structured metadata mappings to set. To not set any structured metadata, set this to an empty object (e.g. `{}`) Format: `: `. Example: structuredMetadata: component: component kind: kind name: name | ### General settings diff --git a/charts/feature-cluster-events/templates/_module.alloy.tpl b/charts/feature-cluster-events/templates/_module.alloy.tpl index 540f2e6d9..9efd40c0e 100644 --- a/charts/feature-cluster-events/templates/_module.alloy.tpl +++ b/charts/feature-cluster-events/templates/_module.alloy.tpl @@ -10,13 +10,96 @@ declare "cluster_events" { {{- if .Values.namespaces }} namespaces = {{ .Values.namespaces | toJson }} {{- end }} -{{- if .Values.extraProcessingStages }} forward_to = [loki.process.cluster_events.receiver] } loki.process "cluster_events" { -{{ .Values.extraProcessingStages | indent 4 }} -{{- end }} + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + {{- if eq .Values.logFormat "json" }} + stage.json { + expressions = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + {{- else }} + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + {{- end }} + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + {{- if .Values.extraLogProcessingStages }} + {{ tpl .Values.extraLogProcessingStages $ | indent 4 }} + {{ end }} + + {{- /* the stage.structured_metadata block needs to be conditionalized because the support for enabling structured metadata can be disabled */ -}} + {{- /* through the loki limits_conifg on a per-tenant basis, even if there are no values defined or there are values defined but it is disabled */ -}} + {{- /* in Loki, the write will fail. */ -}} + {{- if gt (len (keys .Values.structuredMetadata)) 0 }} + // set the structured metadata values + stage.structured_metadata { + values = { + {{- range $key, $value := .Values.structuredMetadata }} + {{ $key | quote }} = {{ if $value }}{{ $value | quote }}{{ else }}{{ $key | quote }}{{ end }}, + {{- end }} + } + } + {{- end }} + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = {{ .Values.labelsToKeep | toJson }} + } forward_to = argument.logs_destinations.value } } diff --git a/charts/feature-cluster-events/tests/default_test.yaml b/charts/feature-cluster-events/tests/default_test.yaml index b1aa3d6c9..07478870a 100644 --- a/charts/feature-cluster-events/tests/default_test.yaml +++ b/charts/feature-cluster-events/tests/default_test.yaml @@ -16,10 +16,70 @@ tests: argument "logs_destinations" { comment = "Must be a list of log destinations where collected logs should be forwarded to" } - + loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/feature-cluster-events/tests/extra_processing_stages_test.yaml b/charts/feature-cluster-events/tests/extra_processing_stages_test.yaml index d67ba6c8b..953097cd1 100644 --- a/charts/feature-cluster-events/tests/extra_processing_stages_test.yaml +++ b/charts/feature-cluster-events/tests/extra_processing_stages_test.yaml @@ -3,10 +3,10 @@ suite: Test extra processing stages templates: - configmap.yaml tests: - - it: should create a ConfigMap + - it: should create a ConfigMap with extra processing stages set: deployAsConfigMap: true - extraProcessingStages: |- + extraLogProcessingStages: |- stage.drop { source = "namespace" value = "private" @@ -21,7 +21,7 @@ tests: argument "logs_destinations" { comment = "Must be a list of log destinations where collected logs should be forwarded to" } - + loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" @@ -29,10 +29,67 @@ tests: } loki.process "cluster_events" { - stage.drop { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + stage.drop { source = "namespace" value = "private" } + + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/feature-cluster-events/tests/labels_test.yaml b/charts/feature-cluster-events/tests/labels_test.yaml new file mode 100644 index 000000000..e216d4d56 --- /dev/null +++ b/charts/feature-cluster-events/tests/labels_test.yaml @@ -0,0 +1,92 @@ +# yamllint disable rule:document-start rule:line-length rule:trailing-spaces +suite: Test namespaces +templates: + - configmap.yaml +tests: + - it: should create a ConfigMap that sets custom labels to keep + set: + deployAsConfigMap: true + labelsToKeep: + - job + - namespace + - level + - node + - name + - source + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["module.alloy"] + value: |- + declare "cluster_events" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.source.kubernetes_events "cluster_events" { + job_name = "integrations/kubernetes/eventhandler" + log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","namespace","level","node","name","source"] + } + forward_to = argument.logs_destinations.value + } + } diff --git a/charts/feature-cluster-events/tests/namespace_test.yaml b/charts/feature-cluster-events/tests/namespace_test.yaml index 7b426ddb8..ba4e78a7c 100644 --- a/charts/feature-cluster-events/tests/namespace_test.yaml +++ b/charts/feature-cluster-events/tests/namespace_test.yaml @@ -3,7 +3,7 @@ suite: Test namespaces templates: - configmap.yaml tests: - - it: should create a ConfigMap + - it: should create a ConfigMap that restricts events to the given namespaces set: deployAsConfigMap: true namespaces: ["a", "b"] @@ -17,11 +17,71 @@ tests: argument "logs_destinations" { comment = "Must be a list of log destinations where collected logs should be forwarded to" } - + loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" namespaces = ["a","b"] + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/feature-cluster-events/tests/structured_metadata_test.yaml b/charts/feature-cluster-events/tests/structured_metadata_test.yaml new file mode 100644 index 000000000..f9b1d2a35 --- /dev/null +++ b/charts/feature-cluster-events/tests/structured_metadata_test.yaml @@ -0,0 +1,97 @@ +# yamllint disable rule:document-start rule:line-length rule:trailing-spaces +suite: Test namespaces +templates: + - configmap.yaml +tests: + - it: should create a ConfigMap that sets structured metadata k/v pairs + set: + deployAsConfigMap: true + structuredMetadata: + kind: kind + component: component + name: name + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["module.alloy"] + value: |- + declare "cluster_events" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.source.kubernetes_events "cluster_events" { + job_name = "integrations/kubernetes/eventhandler" + log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + // set the structured metadata values + stage.structured_metadata { + values = { + "component" = "component", + "kind" = "kind", + "name" = "name", + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } + forward_to = argument.logs_destinations.value + } + } diff --git a/charts/feature-cluster-events/values.schema.json b/charts/feature-cluster-events/values.schema.json index 37c56b68e..80c955900 100644 --- a/charts/feature-cluster-events/values.schema.json +++ b/charts/feature-cluster-events/values.schema.json @@ -5,12 +5,18 @@ "deployAsConfigMap": { "type": "boolean" }, - "extraProcessingStages": { + "extraLogProcessingStages": { "type": "string" }, "fullnameOverride": { "type": "string" }, + "labelsToKeep": { + "type": "array", + "items": { + "type": "string" + } + }, "logFormat": { "type": "string" }, @@ -19,6 +25,9 @@ }, "namespaces": { "type": "array" + }, + "structuredMetadata": { + "type": "object" } } } diff --git a/charts/feature-cluster-events/values.yaml b/charts/feature-cluster-events/values.yaml index bc513a3b2..c2a95fbbc 100644 --- a/charts/feature-cluster-events/values.yaml +++ b/charts/feature-cluster-events/values.yaml @@ -19,7 +19,27 @@ logFormat: logfmt # ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/#blocks)) # This value is templated so that you can refer to other values from this file. # @section -- Processing settings -extraProcessingStages: "" +extraLogProcessingStages: "" + +# -- The list of labels to keep on the logs, all other pipeline labels will be dropped. +# @section -- Processing settings +labelsToKeep: + - job + - level + - namespace + - node + - source + +# -- The structured metadata mappings to set. +# To not set any structured metadata, set this to an empty object (e.g. `{}`) +# Format: `: `. +# Example: +# structuredMetadata: +# component: component +# kind: kind +# name: name +# @section -- Processing settings +structuredMetadata: {} # @ignore deployAsConfigMap: false diff --git a/charts/feature-node-logs/README.md b/charts/feature-node-logs/README.md index 19856da93..b22a0fc9c 100644 --- a/charts/feature-node-logs/README.md +++ b/charts/feature-node-logs/README.md @@ -33,6 +33,18 @@ Be sure perform actual integration testing in a live environment in the main [k8 ## Values +### Journal Logs + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| extraDiscoveryRules | string | `""` | Rule blocks to be added used with the loki.source.journal component for journal logs. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) **Note:** Many field names from journald start with an `_`, such as `_systemd_unit`. The final internal label name would be `__journal__systemd_unit`, with two underscores between `__journal` and `systemd_unit`. | +| extraLogProcessingStages | string | `""` | Stage blocks to be added to the loki.process component for journal logs. ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/#blocks)) This value is templated so that you can refer to other values from this file. | +| journal.formatAsJson | bool | `false` | Whether to forward the original journal entry as JSON. | +| journal.jobLabel | string | `"integrations/kubernetes/journal"` | The value for the job label for journal logs. | +| journal.maxAge | string | `"8h"` | The path to the journal logs on the worker node. | +| journal.path | string | `"/var/log/journal"` | The path to the journal logs on the worker node. | +| journal.units | list | `[]` | The list of systemd units to keep scraped logs from. If empty, all units are scraped. | + ### General settings | Key | Type | Default | Description | @@ -40,14 +52,9 @@ Be sure perform actual integration testing in a live environment in the main [k8 | fullnameOverride | string | `""` | Full name override | | nameOverride | string | `""` | Name override | -### Journal Logs +### Processing settings | Key | Type | Default | Description | |-----|------|---------|-------------| -| journal.extraDiscoveryRules | string | `""` | Rule blocks to be added used with the loki.source.journal component for journal logs. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) **Note:** Many field names from journald start with an `_`, such as `_systemd_unit`. The final internal label name would be `__journal__systemd_unit`, with two underscores between `__journal` and `systemd_unit`. | -| journal.extraLogProcessingBlocks | string | `""` | Stage blocks to be added to the loki.process component for journal logs. ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/#blocks)) This value is templated so that you can refer to other values from this file. | -| journal.formatAsJson | bool | `false` | Whether to forward the original journal entry as JSON. | -| journal.jobLabel | string | `"integrations/kubernetes/journal"` | The value for the job label for journal logs. | -| journal.maxAge | string | `"8h"` | The path to the journal logs on the worker node. | -| journal.path | string | `"/var/log/journal"` | The path to the journal logs on the worker node. | -| journal.units | list | `[]` | The list of systemd units to keep scraped logs from. If empty, all units are scraped. | +| labelsToKeep | list | `["instance","level","name","unit","service_name"]` | The list of labels to keep on the logs, all other pipeline labels will be dropped. | +| structuredMetadata | object | `{}` | The structured metadata mappings to set. To not set any structured metadata, set this to an empty object (e.g. `{}`) Format: `: `. Example: structuredMetadata: detected_level: level | diff --git a/charts/feature-node-logs/templates/_module.alloy.tpl b/charts/feature-node-logs/templates/_module.alloy.tpl index fe780316c..a8870437d 100644 --- a/charts/feature-node-logs/templates/_module.alloy.tpl +++ b/charts/feature-node-logs/templates/_module.alloy.tpl @@ -12,14 +12,57 @@ declare "node_logs" { regex = "{{ join "|" .Values.journal.units }}" } {{- end }} + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "journal", + } + } + + // copy all journal labels and make the available to the pipeline stages as labels, there is a label + // keep defined to filter out unwanted labels, these pipeline labels can be set as structured metadata + // as well, the following labels are available: + // - boot_id + // - cap_effective + // - cmdline + // - comm + // - exe + // - gid + // - hostname + // - machine_id + // - pid + // - stream_id + // - systemd_cgroup + // - systemd_invocation_id + // - systemd_slice + // - systemd_unit + // - transport + // - uid + // + // More Info: https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html + rule { + action = "labelmap" + regex = "__journal__(.+)" + } + rule { action = "replace" source_labels = ["__journal__systemd_unit"] replacement = "$1" target_label = "unit" } - {{- if .Values.journal.extraDiscoveryRules }} - {{ .Values.journal.extraDiscoveryRules | indent 2 }} + + // the service_name label will be set automatically in loki if not set, and the unit label + // will not allow service_name to be set automatically. + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "service_name" + } + {{- if .Values.extraDiscoveryRules }} + {{ .Values.extraDiscoveryRules | indent 2 }} {{- end }} forward_to = [] // No forward_to is used in this component, the defined rules are used in the loki.source.journal component @@ -38,9 +81,100 @@ declare "node_logs" { } loki.process "journal_logs" { - {{- if .Values.journal.extraLogProcessingBlocks }} - {{ tpl .Values.journal.extraLogProcessingBlocks . | indent 2 }} - {{ end }} + // Attempt to determine the log level, most k8s workers are either in logfmt or klog formats + // default level to unknown + stage.static_labels { + values = { + level = "unknown", + } + } + + // check to see if the log line matches the klog format (https://github.com/kubernetes/klog) + stage.match { + // unescaped regex: ([IWED][0-9]{4}\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+) + selector = "{level=\"unknown\"} |~ \"([IWED][0-9]{4}\\\\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\\\\.[0-9]+)\"" + + // extract log level, klog uses a single letter code for the level followed by the month and day i.e. I0119 + stage.regex { + expression = "((?P[A-Z])[0-9])" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(I)" + replace = "INFO" + } + + // if the extracted level is W set WARN + stage.replace { + source = "level" + expression = "(W)" + replace = "WARN" + } + + // if the extracted level is E set ERROR + stage.replace { + source = "level" + expression = "(E)" + replace = "ERROR" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(D)" + replace = "DEBUG" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // if the level is still unknown, do one last attempt at detecting it based on common levels + stage.match { + selector = "{level=\"unknown\"}" + + // unescaped regex: (?i)(?:"(?:level|loglevel|levelname|lvl|levelText|SeverityText)":\s*"|\s*(?:level|loglevel|levelText|lvl)="?|\s+\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))("|\s+|-|\s*\]) + stage.regex { + expression = "(?i)(?:\"(?:level|loglevel|levelname|lvl|levelText|SeverityText)\":\\s*\"|\\s*(?:level|loglevel|levelText|lvl)=\"?|\\s+\\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))(\"|\\s+|-|\\s*\\])" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + {{- if .Values.extraLogProcessingStages }} + {{ tpl .Values.extraLogProcessingStages . | indent 4 }} + {{ end }} + + {{- /* the stage.structured_metadata block needs to be conditionalized because the support for enabling structured metadata can be disabled */ -}} + {{- /* through the loki limits_conifg on a per-tenant basis, even if there are no values defined or there are values defined but it is disabled */ -}} + {{- /* in Loki, the write will fail. */ -}} + {{- if gt (len (keys .Values.structuredMetadata)) 0 }} + // set the structured metadata values + stage.structured_metadata { + values = { + {{- range $key, $value := .Values.structuredMetadata }} + {{ $key | quote }} = {{ if $value }}{{ $value | quote }}{{ else }}{{ $key | quote }}{{ end }}, + {{- end }} + } + } + {{- end }} + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = {{ .Values.labelsToKeep | toJson }} + } + forward_to = argument.logs_destinations.value } } diff --git a/charts/feature-node-logs/tests/default_test.yaml b/charts/feature-node-logs/tests/default_test.yaml index d43dead03..058a2258e 100644 --- a/charts/feature-node-logs/tests/default_test.yaml +++ b/charts/feature-node-logs/tests/default_test.yaml @@ -16,18 +16,61 @@ tests: argument "logs_destinations" { comment = "Must be a list of log destinations where collected logs should be forwarded to" } - + loki.relabel "journal" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "journal", + } + } + + // copy all journal labels and make the available to the pipeline stages as labels, there is a label + // keep defined to filter out unwanted labels, these pipeline labels can be set as structured metadata + // as well, the following labels are available: + // - boot_id + // - cap_effective + // - cmdline + // - comm + // - exe + // - gid + // - hostname + // - machine_id + // - pid + // - stream_id + // - systemd_cgroup + // - systemd_invocation_id + // - systemd_slice + // - systemd_unit + // - transport + // - uid + // + // More Info: https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html + rule { + action = "labelmap" + regex = "__journal__(.+)" + } + rule { action = "replace" source_labels = ["__journal__systemd_unit"] replacement = "$1" target_label = "unit" } - + + // the service_name label will be set automatically in loki if not set, and the unit label + // will not allow service_name to be set automatically. + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "service_name" + } + forward_to = [] // No forward_to is used in this component, the defined rules are used in the loki.source.journal component } - + loki.source.journal "worker" { path = "/var/log/journal" format_as_json = false @@ -39,8 +82,84 @@ tests: } forward_to = [loki.process.journal_logs.receiver] } - + loki.process "journal_logs" { + // Attempt to determine the log level, most k8s workers are either in logfmt or klog formats + // default level to unknown + stage.static_labels { + values = { + level = "unknown", + } + } + + // check to see if the log line matches the klog format (https://github.com/kubernetes/klog) + stage.match { + // unescaped regex: ([IWED][0-9]{4}\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+) + selector = "{level=\"unknown\"} |~ \"([IWED][0-9]{4}\\\\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\\\\.[0-9]+)\"" + + // extract log level, klog uses a single letter code for the level followed by the month and day i.e. I0119 + stage.regex { + expression = "((?P[A-Z])[0-9])" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(I)" + replace = "INFO" + } + + // if the extracted level is W set WARN + stage.replace { + source = "level" + expression = "(W)" + replace = "WARN" + } + + // if the extracted level is E set ERROR + stage.replace { + source = "level" + expression = "(E)" + replace = "ERROR" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(D)" + replace = "DEBUG" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // if the level is still unknown, do one last attempt at detecting it based on common levels + stage.match { + selector = "{level=\"unknown\"}" + + // unescaped regex: (?i)(?:"(?:level|loglevel|levelname|lvl|levelText|SeverityText)":\s*"|\s*(?:level|loglevel|levelText|lvl)="?|\s+\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))("|\s+|-|\s*\]) + stage.regex { + expression = "(?i)(?:\"(?:level|loglevel|levelname|lvl|levelText|SeverityText)\":\\s*\"|\\s*(?:level|loglevel|levelText|lvl)=\"?|\\s+\\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))(\"|\\s+|-|\\s*\\])" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["instance","level","name","unit","service_name"] + } + forward_to = argument.logs_destinations.value } } diff --git a/charts/feature-node-logs/tests/filter_units_test.yaml b/charts/feature-node-logs/tests/filter_units_test.yaml new file mode 100644 index 000000000..27b10c3d8 --- /dev/null +++ b/charts/feature-node-logs/tests/filter_units_test.yaml @@ -0,0 +1,176 @@ +# yamllint disable rule:document-start rule:line-length rule:trailing-spaces +suite: Test Filtering Units +templates: + - configmap.yaml +tests: + - it: should create a ConfigMap that only includes certain units + set: + deployAsConfigMap: true + journal: + units: + - kubelet.service + - docker.service + - containerd.service + - node-problem-detector.service + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["module.alloy"] + value: |- + declare "node_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.relabel "journal" { + rule { + action = "keep" + source_labels = ["__journal__systemd_unit"] + regex = "kubelet.service|docker.service|containerd.service|node-problem-detector.service" + } + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "journal", + } + } + + // copy all journal labels and make the available to the pipeline stages as labels, there is a label + // keep defined to filter out unwanted labels, these pipeline labels can be set as structured metadata + // as well, the following labels are available: + // - boot_id + // - cap_effective + // - cmdline + // - comm + // - exe + // - gid + // - hostname + // - machine_id + // - pid + // - stream_id + // - systemd_cgroup + // - systemd_invocation_id + // - systemd_slice + // - systemd_unit + // - transport + // - uid + // + // More Info: https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html + rule { + action = "labelmap" + regex = "__journal__(.+)" + } + + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "unit" + } + + // the service_name label will be set automatically in loki if not set, and the unit label + // will not allow service_name to be set automatically. + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "service_name" + } + + forward_to = [] // No forward_to is used in this component, the defined rules are used in the loki.source.journal component + } + + loki.source.journal "worker" { + path = "/var/log/journal" + format_as_json = false + max_age = "8h" + relabel_rules = loki.relabel.journal.rules + labels = { + job = "integrations/kubernetes/journal", + instance = env("HOSTNAME"), + } + forward_to = [loki.process.journal_logs.receiver] + } + + loki.process "journal_logs" { + // Attempt to determine the log level, most k8s workers are either in logfmt or klog formats + // default level to unknown + stage.static_labels { + values = { + level = "unknown", + } + } + + // check to see if the log line matches the klog format (https://github.com/kubernetes/klog) + stage.match { + // unescaped regex: ([IWED][0-9]{4}\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+) + selector = "{level=\"unknown\"} |~ \"([IWED][0-9]{4}\\\\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\\\\.[0-9]+)\"" + + // extract log level, klog uses a single letter code for the level followed by the month and day i.e. I0119 + stage.regex { + expression = "((?P[A-Z])[0-9])" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(I)" + replace = "INFO" + } + + // if the extracted level is W set WARN + stage.replace { + source = "level" + expression = "(W)" + replace = "WARN" + } + + // if the extracted level is E set ERROR + stage.replace { + source = "level" + expression = "(E)" + replace = "ERROR" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(D)" + replace = "DEBUG" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // if the level is still unknown, do one last attempt at detecting it based on common levels + stage.match { + selector = "{level=\"unknown\"}" + + // unescaped regex: (?i)(?:"(?:level|loglevel|levelname|lvl|levelText|SeverityText)":\s*"|\s*(?:level|loglevel|levelText|lvl)="?|\s+\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))("|\s+|-|\s*\]) + stage.regex { + expression = "(?i)(?:\"(?:level|loglevel|levelname|lvl|levelText|SeverityText)\":\\s*\"|\\s*(?:level|loglevel|levelText|lvl)=\"?|\\s+\\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))(\"|\\s+|-|\\s*\\])" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["instance","level","name","unit","service_name"] + } + + forward_to = argument.logs_destinations.value + } + } diff --git a/charts/feature-node-logs/tests/labels_test.yaml b/charts/feature-node-logs/tests/labels_test.yaml new file mode 100644 index 000000000..dd112c72a --- /dev/null +++ b/charts/feature-node-logs/tests/labels_test.yaml @@ -0,0 +1,170 @@ +# yamllint disable rule:document-start rule:line-length rule:trailing-spaces +suite: Test labels +templates: + - configmap.yaml +tests: + - it: should create a ConfigMap that sets custom labels to keep + set: + deployAsConfigMap: true + labelsToKeep: + - exe + - hostname + - level + - unit + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["module.alloy"] + value: |- + declare "node_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.relabel "journal" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "journal", + } + } + + // copy all journal labels and make the available to the pipeline stages as labels, there is a label + // keep defined to filter out unwanted labels, these pipeline labels can be set as structured metadata + // as well, the following labels are available: + // - boot_id + // - cap_effective + // - cmdline + // - comm + // - exe + // - gid + // - hostname + // - machine_id + // - pid + // - stream_id + // - systemd_cgroup + // - systemd_invocation_id + // - systemd_slice + // - systemd_unit + // - transport + // - uid + // + // More Info: https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html + rule { + action = "labelmap" + regex = "__journal__(.+)" + } + + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "unit" + } + + // the service_name label will be set automatically in loki if not set, and the unit label + // will not allow service_name to be set automatically. + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "service_name" + } + + forward_to = [] // No forward_to is used in this component, the defined rules are used in the loki.source.journal component + } + + loki.source.journal "worker" { + path = "/var/log/journal" + format_as_json = false + max_age = "8h" + relabel_rules = loki.relabel.journal.rules + labels = { + job = "integrations/kubernetes/journal", + instance = env("HOSTNAME"), + } + forward_to = [loki.process.journal_logs.receiver] + } + + loki.process "journal_logs" { + // Attempt to determine the log level, most k8s workers are either in logfmt or klog formats + // default level to unknown + stage.static_labels { + values = { + level = "unknown", + } + } + + // check to see if the log line matches the klog format (https://github.com/kubernetes/klog) + stage.match { + // unescaped regex: ([IWED][0-9]{4}\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+) + selector = "{level=\"unknown\"} |~ \"([IWED][0-9]{4}\\\\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\\\\.[0-9]+)\"" + + // extract log level, klog uses a single letter code for the level followed by the month and day i.e. I0119 + stage.regex { + expression = "((?P[A-Z])[0-9])" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(I)" + replace = "INFO" + } + + // if the extracted level is W set WARN + stage.replace { + source = "level" + expression = "(W)" + replace = "WARN" + } + + // if the extracted level is E set ERROR + stage.replace { + source = "level" + expression = "(E)" + replace = "ERROR" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(D)" + replace = "DEBUG" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // if the level is still unknown, do one last attempt at detecting it based on common levels + stage.match { + selector = "{level=\"unknown\"}" + + // unescaped regex: (?i)(?:"(?:level|loglevel|levelname|lvl|levelText|SeverityText)":\s*"|\s*(?:level|loglevel|levelText|lvl)="?|\s+\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))("|\s+|-|\s*\]) + stage.regex { + expression = "(?i)(?:\"(?:level|loglevel|levelname|lvl|levelText|SeverityText)\":\\s*\"|\\s*(?:level|loglevel|levelText|lvl)=\"?|\\s+\\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))(\"|\\s+|-|\\s*\\])" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["exe","hostname","level","unit"] + } + + forward_to = argument.logs_destinations.value + } + } diff --git a/charts/feature-node-logs/tests/structured_metadata_test.yaml b/charts/feature-node-logs/tests/structured_metadata_test.yaml new file mode 100644 index 000000000..e4e46ea3b --- /dev/null +++ b/charts/feature-node-logs/tests/structured_metadata_test.yaml @@ -0,0 +1,181 @@ +# yamllint disable rule:document-start rule:line-length rule:trailing-spaces +suite: Test Structured Metadata +templates: + - configmap.yaml +tests: + - it: should create a ConfigMap that sets structured metadata k/v pairs + set: + deployAsConfigMap: true + structuredMetadata: + cmdline: cmdline + comm: comm + exe: exe + pid: pid + unit: unit + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["module.alloy"] + value: |- + declare "node_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.relabel "journal" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "journal", + } + } + + // copy all journal labels and make the available to the pipeline stages as labels, there is a label + // keep defined to filter out unwanted labels, these pipeline labels can be set as structured metadata + // as well, the following labels are available: + // - boot_id + // - cap_effective + // - cmdline + // - comm + // - exe + // - gid + // - hostname + // - machine_id + // - pid + // - stream_id + // - systemd_cgroup + // - systemd_invocation_id + // - systemd_slice + // - systemd_unit + // - transport + // - uid + // + // More Info: https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html + rule { + action = "labelmap" + regex = "__journal__(.+)" + } + + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "unit" + } + + // the service_name label will be set automatically in loki if not set, and the unit label + // will not allow service_name to be set automatically. + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "service_name" + } + + forward_to = [] // No forward_to is used in this component, the defined rules are used in the loki.source.journal component + } + + loki.source.journal "worker" { + path = "/var/log/journal" + format_as_json = false + max_age = "8h" + relabel_rules = loki.relabel.journal.rules + labels = { + job = "integrations/kubernetes/journal", + instance = env("HOSTNAME"), + } + forward_to = [loki.process.journal_logs.receiver] + } + + loki.process "journal_logs" { + // Attempt to determine the log level, most k8s workers are either in logfmt or klog formats + // default level to unknown + stage.static_labels { + values = { + level = "unknown", + } + } + + // check to see if the log line matches the klog format (https://github.com/kubernetes/klog) + stage.match { + // unescaped regex: ([IWED][0-9]{4}\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+) + selector = "{level=\"unknown\"} |~ \"([IWED][0-9]{4}\\\\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\\\\.[0-9]+)\"" + + // extract log level, klog uses a single letter code for the level followed by the month and day i.e. I0119 + stage.regex { + expression = "((?P[A-Z])[0-9])" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(I)" + replace = "INFO" + } + + // if the extracted level is W set WARN + stage.replace { + source = "level" + expression = "(W)" + replace = "WARN" + } + + // if the extracted level is E set ERROR + stage.replace { + source = "level" + expression = "(E)" + replace = "ERROR" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(D)" + replace = "DEBUG" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // if the level is still unknown, do one last attempt at detecting it based on common levels + stage.match { + selector = "{level=\"unknown\"}" + + // unescaped regex: (?i)(?:"(?:level|loglevel|levelname|lvl|levelText|SeverityText)":\s*"|\s*(?:level|loglevel|levelText|lvl)="?|\s+\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))("|\s+|-|\s*\]) + stage.regex { + expression = "(?i)(?:\"(?:level|loglevel|levelname|lvl|levelText|SeverityText)\":\\s*\"|\\s*(?:level|loglevel|levelText|lvl)=\"?|\\s+\\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))(\"|\\s+|-|\\s*\\])" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + // set the structured metadata values + stage.structured_metadata { + values = { + "cmdline" = "cmdline", + "comm" = "comm", + "exe" = "exe", + "pid" = "pid", + "unit" = "unit", + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["instance","level","name","unit","service_name"] + } + + forward_to = argument.logs_destinations.value + } + } diff --git a/charts/feature-node-logs/values.schema.json b/charts/feature-node-logs/values.schema.json index 770bd6d9a..c99107cdb 100644 --- a/charts/feature-node-logs/values.schema.json +++ b/charts/feature-node-logs/values.schema.json @@ -5,18 +5,18 @@ "deployAsConfigMap": { "type": "boolean" }, + "extraDiscoveryRules": { + "type": "string" + }, + "extraLogProcessingStages": { + "type": "string" + }, "fullnameOverride": { "type": "string" }, "journal": { "type": "object", "properties": { - "extraDiscoveryRules": { - "type": "string" - }, - "extraLogProcessingBlocks": { - "type": "string" - }, "formatAsJson": { "type": "boolean" }, @@ -34,8 +34,17 @@ } } }, + "labelsToKeep": { + "type": "array", + "items": { + "type": "string" + } + }, "nameOverride": { "type": "string" + }, + "structuredMetadata": { + "type": "object" } } } diff --git a/charts/feature-node-logs/values.yaml b/charts/feature-node-logs/values.yaml index 6229ee2bc..525cf149c 100644 --- a/charts/feature-node-logs/values.yaml +++ b/charts/feature-node-logs/values.yaml @@ -30,21 +30,40 @@ journal: # - kubelet.service # - docker.service # - containerd.service + # - node-problem-detector.service - # -- Rule blocks to be added used with the loki.source.journal component for journal logs. - # These relabeling rules are applied pre-scrape against the targets from service discovery. - # Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. - # ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) - # **Note:** Many field names from journald start with an `_`, such as `_systemd_unit`. The final internal label name would - # be `__journal__systemd_unit`, with two underscores between `__journal` and `systemd_unit`. - # @section -- Journal Logs - extraDiscoveryRules: "" +# -- Rule blocks to be added used with the loki.source.journal component for journal logs. +# These relabeling rules are applied pre-scrape against the targets from service discovery. +# Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. +# ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) +# **Note:** Many field names from journald start with an `_`, such as `_systemd_unit`. The final internal label name would +# be `__journal__systemd_unit`, with two underscores between `__journal` and `systemd_unit`. +# @section -- Journal Logs +extraDiscoveryRules: "" - # -- Stage blocks to be added to the loki.process component for journal logs. - # ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/#blocks)) - # This value is templated so that you can refer to other values from this file. - # @section -- Journal Logs - extraLogProcessingBlocks: "" +# -- Stage blocks to be added to the loki.process component for journal logs. +# ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/#blocks)) +# This value is templated so that you can refer to other values from this file. +# @section -- Journal Logs +extraLogProcessingStages: "" + +# -- The list of labels to keep on the logs, all other pipeline labels will be dropped. +# @section -- Processing settings +labelsToKeep: + - instance + - level + - name + - unit + - service_name + +# -- The structured metadata mappings to set. +# To not set any structured metadata, set this to an empty object (e.g. `{}`) +# Format: `: `. +# Example: +# structuredMetadata: +# detected_level: level +# @section -- Processing settings +structuredMetadata: {} # @ignore deployAsConfigMap: false diff --git a/charts/k8s-monitoring/Chart.lock b/charts/k8s-monitoring/Chart.lock index 093f56928..b19cb585d 100644 --- a/charts/k8s-monitoring/Chart.lock +++ b/charts/k8s-monitoring/Chart.lock @@ -45,4 +45,4 @@ dependencies: repository: https://grafana.github.io/helm-charts version: 0.10.1 digest: sha256:e2eab36a3b814d7ec09942c3fe46a5c76d606f7c3022628905c41fba7a47b405 -generated: "2024-12-18T22:07:32.727406-05:00" +generated: "2024-12-19T11:30:12.505646-05:00" diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz index bb7b1b990..d627f6de7 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz index 08fd96747..e9107cc30 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-auto-instrumentation-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-auto-instrumentation-1.0.0.tgz index 35b2e3166..36291a7e4 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-auto-instrumentation-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-auto-instrumentation-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz index 6fdc3d4fb..c59ad0eb6 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz index cb14020de..1bbffd51e 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz index bc1fd6216..33e5e4e96 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-node-logs-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-node-logs-1.0.0.tgz index cfb685a0f..a31c75ac6 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-node-logs-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-node-logs-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz index 9d612ba09..a322de6e0 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz index 174690a59..a1b1df839 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz index 69bdc2761..f54842205 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/docs/examples/extra-rules/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/extra-rules/alloy-singleton.alloy index cc59fabd7..924d6b8fd 100644 --- a/charts/k8s-monitoring/docs/examples/extra-rules/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/extra-rules/alloy-singleton.alloy @@ -89,30 +89,62 @@ declare "cluster_events" { } loki.process "cluster_events" { - stage.logfmt { - payload = "" - } - - stage.json { - source = "payload" - expressions = { - sku = "id", - count = "", + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", } } - - stage.static { - values = { - site = "lab2", + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node } } - + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available stage.labels { values = { - sku = "", - count = "", + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", } } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/extra-rules/output.yaml b/charts/k8s-monitoring/docs/examples/extra-rules/output.yaml index 8776bd377..05b5ec2e0 100644 --- a/charts/k8s-monitoring/docs/examples/extra-rules/output.yaml +++ b/charts/k8s-monitoring/docs/examples/extra-rules/output.yaml @@ -504,30 +504,62 @@ data: } loki.process "cluster_events" { - stage.logfmt { - payload = "" - } - - stage.json { - source = "payload" - expressions = { - sku = "id", - count = "", + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", } } - - stage.static { - values = { - site = "lab2", + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node } } - + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available stage.labels { values = { - sku = "", - count = "", + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", } } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-events/default/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/features/cluster-events/default/alloy-singleton.alloy index 75435bbac..6852a232b 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-events/default/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/features/cluster-events/default/alloy-singleton.alloy @@ -25,6 +25,66 @@ declare "cluster_events" { loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-events/default/output.yaml b/charts/k8s-monitoring/docs/examples/features/cluster-events/default/output.yaml index a71f14bd2..4a6038b8f 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-events/default/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/cluster-events/default/output.yaml @@ -50,6 +50,66 @@ data: loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-singleton.alloy index 6f2009c76..3ad62ba14 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-singleton.alloy @@ -73,6 +73,66 @@ declare "cluster_events" { loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/output.yaml b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/output.yaml index 39a672530..8ca8fc83b 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/output.yaml @@ -627,6 +627,66 @@ data: loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/features/node-logs/default/alloy-logs.alloy b/charts/k8s-monitoring/docs/examples/features/node-logs/default/alloy-logs.alloy index 19bc87fd9..ec49a0637 100644 --- a/charts/k8s-monitoring/docs/examples/features/node-logs/default/alloy-logs.alloy +++ b/charts/k8s-monitoring/docs/examples/features/node-logs/default/alloy-logs.alloy @@ -23,6 +23,40 @@ declare "node_logs" { } loki.relabel "journal" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "journal", + } + } + + // copy all journal labels and make the available to the pipeline stages as labels, there is a label + // keep defined to filter out unwanted labels, these pipeline labels can be set as structured metadata + // as well, the following labels are available: + // - boot_id + // - cap_effective + // - cmdline + // - comm + // - exe + // - gid + // - hostname + // - machine_id + // - pid + // - stream_id + // - systemd_cgroup + // - systemd_invocation_id + // - systemd_slice + // - systemd_unit + // - transport + // - uid + // + // More Info: https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html + rule { + action = "labelmap" + regex = "__journal__(.+)" + } + rule { action = "replace" source_labels = ["__journal__systemd_unit"] @@ -30,6 +64,15 @@ declare "node_logs" { target_label = "unit" } + // the service_name label will be set automatically in loki if not set, and the unit label + // will not allow service_name to be set automatically. + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "service_name" + } + forward_to = [] // No forward_to is used in this component, the defined rules are used in the loki.source.journal component } @@ -46,6 +89,82 @@ declare "node_logs" { } loki.process "journal_logs" { + // Attempt to determine the log level, most k8s workers are either in logfmt or klog formats + // default level to unknown + stage.static_labels { + values = { + level = "unknown", + } + } + + // check to see if the log line matches the klog format (https://github.com/kubernetes/klog) + stage.match { + // unescaped regex: ([IWED][0-9]{4}\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+) + selector = "{level=\"unknown\"} |~ \"([IWED][0-9]{4}\\\\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\\\\.[0-9]+)\"" + + // extract log level, klog uses a single letter code for the level followed by the month and day i.e. I0119 + stage.regex { + expression = "((?P[A-Z])[0-9])" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(I)" + replace = "INFO" + } + + // if the extracted level is W set WARN + stage.replace { + source = "level" + expression = "(W)" + replace = "WARN" + } + + // if the extracted level is E set ERROR + stage.replace { + source = "level" + expression = "(E)" + replace = "ERROR" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(D)" + replace = "DEBUG" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // if the level is still unknown, do one last attempt at detecting it based on common levels + stage.match { + selector = "{level=\"unknown\"}" + + // unescaped regex: (?i)(?:"(?:level|loglevel|levelname|lvl|levelText|SeverityText)":\s*"|\s*(?:level|loglevel|levelText|lvl)="?|\s+\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))("|\s+|-|\s*\]) + stage.regex { + expression = "(?i)(?:\"(?:level|loglevel|levelname|lvl|levelText|SeverityText)\":\\s*\"|\\s*(?:level|loglevel|levelText|lvl)=\"?|\\s+\\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))(\"|\\s+|-|\\s*\\])" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["instance","level","name","unit","service_name"] + } + forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/features/node-logs/default/output.yaml b/charts/k8s-monitoring/docs/examples/features/node-logs/default/output.yaml index c890fe879..50a44fb7f 100644 --- a/charts/k8s-monitoring/docs/examples/features/node-logs/default/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/node-logs/default/output.yaml @@ -48,6 +48,40 @@ data: } loki.relabel "journal" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "journal", + } + } + + // copy all journal labels and make the available to the pipeline stages as labels, there is a label + // keep defined to filter out unwanted labels, these pipeline labels can be set as structured metadata + // as well, the following labels are available: + // - boot_id + // - cap_effective + // - cmdline + // - comm + // - exe + // - gid + // - hostname + // - machine_id + // - pid + // - stream_id + // - systemd_cgroup + // - systemd_invocation_id + // - systemd_slice + // - systemd_unit + // - transport + // - uid + // + // More Info: https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html + rule { + action = "labelmap" + regex = "__journal__(.+)" + } + rule { action = "replace" source_labels = ["__journal__systemd_unit"] @@ -55,6 +89,15 @@ data: target_label = "unit" } + // the service_name label will be set automatically in loki if not set, and the unit label + // will not allow service_name to be set automatically. + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "service_name" + } + forward_to = [] // No forward_to is used in this component, the defined rules are used in the loki.source.journal component } @@ -71,6 +114,82 @@ data: } loki.process "journal_logs" { + // Attempt to determine the log level, most k8s workers are either in logfmt or klog formats + // default level to unknown + stage.static_labels { + values = { + level = "unknown", + } + } + + // check to see if the log line matches the klog format (https://github.com/kubernetes/klog) + stage.match { + // unescaped regex: ([IWED][0-9]{4}\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+) + selector = "{level=\"unknown\"} |~ \"([IWED][0-9]{4}\\\\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\\\\.[0-9]+)\"" + + // extract log level, klog uses a single letter code for the level followed by the month and day i.e. I0119 + stage.regex { + expression = "((?P[A-Z])[0-9])" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(I)" + replace = "INFO" + } + + // if the extracted level is W set WARN + stage.replace { + source = "level" + expression = "(W)" + replace = "WARN" + } + + // if the extracted level is E set ERROR + stage.replace { + source = "level" + expression = "(E)" + replace = "ERROR" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(D)" + replace = "DEBUG" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // if the level is still unknown, do one last attempt at detecting it based on common levels + stage.match { + selector = "{level=\"unknown\"}" + + // unescaped regex: (?i)(?:"(?:level|loglevel|levelname|lvl|levelText|SeverityText)":\s*"|\s*(?:level|loglevel|levelText|lvl)="?|\s+\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))("|\s+|-|\s*\]) + stage.regex { + expression = "(?i)(?:\"(?:level|loglevel|levelname|lvl|levelText|SeverityText)\":\\s*\"|\\s*(?:level|loglevel|levelText|lvl)=\"?|\\s+\\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))(\"|\\s+|-|\\s*\\])" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["instance","level","name","unit","service_name"] + } + forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-singleton.alloy index d244a43f8..7bc1263e3 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-singleton.alloy @@ -73,6 +73,66 @@ declare "cluster_events" { loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/output.yaml b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/output.yaml index af331231b..4a6750287 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/output.yaml +++ b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/output.yaml @@ -479,6 +479,66 @@ data: loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-singleton.alloy index ddd144811..da9f1318f 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-singleton.alloy @@ -73,6 +73,66 @@ declare "cluster_events" { loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/output.yaml b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/output.yaml index 947cdc789..c020190ef 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/output.yaml +++ b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/output.yaml @@ -427,6 +427,66 @@ data: loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-singleton.alloy index e0067d9a0..d54fe2616 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-singleton.alloy @@ -73,6 +73,66 @@ declare "cluster_events" { loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/output.yaml b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/output.yaml index 2188480c9..cf647085e 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/output.yaml +++ b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/output.yaml @@ -427,6 +427,66 @@ data: loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-singleton.alloy index 63a619bf7..453b9df21 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-singleton.alloy @@ -73,6 +73,66 @@ declare "cluster_events" { loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/platforms/openshift/output.yaml b/charts/k8s-monitoring/docs/examples/platforms/openshift/output.yaml index 3c94b430e..7820f26fe 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/openshift/output.yaml +++ b/charts/k8s-monitoring/docs/examples/platforms/openshift/output.yaml @@ -533,6 +533,66 @@ data: loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/proxies/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/proxies/alloy-singleton.alloy index 5e48e69a9..f5768d2dd 100644 --- a/charts/k8s-monitoring/docs/examples/proxies/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/proxies/alloy-singleton.alloy @@ -74,6 +74,66 @@ declare "cluster_events" { loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/docs/examples/proxies/output.yaml b/charts/k8s-monitoring/docs/examples/proxies/output.yaml index 90b29d6c1..041fe94a9 100644 --- a/charts/k8s-monitoring/docs/examples/proxies/output.yaml +++ b/charts/k8s-monitoring/docs/examples/proxies/output.yaml @@ -513,6 +513,66 @@ data: loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/tests/cluster_events_test.yaml b/charts/k8s-monitoring/tests/cluster_events_test.yaml index 532370c06..a235c169a 100644 --- a/charts/k8s-monitoring/tests/cluster_events_test.yaml +++ b/charts/k8s-monitoring/tests/cluster_events_test.yaml @@ -61,7 +61,7 @@ tests: otelcol.exporter.loki "loki" { forward_to = [loki.write.loki.receiver] } - + loki.write "loki" { endpoint { url = "http://loki.loki.svc:3100/api/push" @@ -74,16 +74,76 @@ tests: "k8s_cluster_name" = "ci-test-cluster", } } - + // Feature: Cluster Events declare "cluster_events" { argument "logs_destinations" { comment = "Must be a list of log destinations where collected logs should be forwarded to" } - + loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/tests/integration/cluster-monitoring/.rendered/output.yaml b/charts/k8s-monitoring/tests/integration/cluster-monitoring/.rendered/output.yaml index 1a5e77c4e..f79495cc4 100644 --- a/charts/k8s-monitoring/tests/integration/cluster-monitoring/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/integration/cluster-monitoring/.rendered/output.yaml @@ -641,6 +641,66 @@ data: loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/tests/integration/control-plane-monitoring/.rendered/output.yaml b/charts/k8s-monitoring/tests/integration/control-plane-monitoring/.rendered/output.yaml index 81ad42ae3..afac29908 100644 --- a/charts/k8s-monitoring/tests/integration/control-plane-monitoring/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/integration/control-plane-monitoring/.rendered/output.yaml @@ -616,6 +616,66 @@ data: loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/tests/platform/eks-with-windows/.rendered/output.yaml b/charts/k8s-monitoring/tests/platform/eks-with-windows/.rendered/output.yaml index 1e5e4eb6a..10d3f519e 100644 --- a/charts/k8s-monitoring/tests/platform/eks-with-windows/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/platform/eks-with-windows/.rendered/output.yaml @@ -829,6 +829,66 @@ data: loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } } diff --git a/charts/k8s-monitoring/tests/platform/gke-autopilot/.rendered/output.yaml b/charts/k8s-monitoring/tests/platform/gke-autopilot/.rendered/output.yaml index e05f60f58..6e876647f 100644 --- a/charts/k8s-monitoring/tests/platform/gke-autopilot/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/platform/gke-autopilot/.rendered/output.yaml @@ -591,6 +591,66 @@ data: loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" + forward_to = [loki.process.cluster_events.receiver] + } + + loki.process "cluster_events" { + + // add a static source label to the logs so they can be differentiated / restricted if necessary + stage.static_labels { + values = { + "source" = "kubernetes-events", + } + } + + // extract some of the fields from the log line, these could be used as labels, structured metadata, etc. + stage.logfmt { + mapping = { + "component" = "sourcecomponent", // map the sourcecomponent field to component + "kind" = "", + "level" = "type", // most events don't have a level but they do have a "type" i.e. Normal, Warning, Error, etc. + "name" = "", + "node" = "sourcehost", // map the sourcehost field to node + } + } + // set these values as labels, they may or may not be used as index labels in Loki as they can be dropped + // prior to being written to Loki, but this makes them available + stage.labels { + values = { + "component" = "", + "kind" = "", + "level" = "", + "name" = "", + "node" = "", + } + } + + // if kind=Node, set the node label by copying the instance label + stage.match { + selector = "{kind=\"Node\"}" + + stage.labels { + values = { + "node" = "name", + } + } + } + + // set the level extracted key value as a normalized log level + stage.match { + selector = "{level=\"Normal\"}" + + stage.static_labels { + values = { + level = "Info", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["job","level","namespace","node","source"] + } forward_to = argument.logs_destinations.value } }