diff --git a/.chloggen/feat_33227.yaml b/.chloggen/feat_33227.yaml
new file mode 100644
index 000000000000..c4a956723098
--- /dev/null
+++ b/.chloggen/feat_33227.yaml
@@ -0,0 +1,29 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: breaking
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: spanmetricsconnector
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Improve consistency between metrics generated by spanmetricsconnector. Added traces.span.metrics as default namespace
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [33227, 32818]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext: "Default namespace for the generated metrics is traces.span.metrics now. |
+ The deprecated metrics are: calls, duration and events. |
+ The feature flag connector.spanmetrics.legacyLatencyMetricNames was added to revert the behavior."
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: [user]
diff --git a/connector/spanmetricsconnector/README.md b/connector/spanmetricsconnector/README.md
index fc8af6a22f78..075ce606cbcf 100644
--- a/connector/spanmetricsconnector/README.md
+++ b/connector/spanmetricsconnector/README.md
@@ -111,7 +111,7 @@ The following settings can be optionally configured:
cumulative temporality to avoid memory leaks and correct metric timestamp resets.
- `aggregation_temporality` (default: `AGGREGATION_TEMPORALITY_CUMULATIVE`): Defines the aggregation temporality of the generated metrics.
One of either `AGGREGATION_TEMPORALITY_CUMULATIVE` or `AGGREGATION_TEMPORALITY_DELTA`.
-- `namespace`: Defines the namespace of the generated metrics. If `namespace` provided, generated metric name will be added `namespace.` prefix.
+- `namespace` (default: `traces.span.metrics`): Defines the namespace of the generated metrics. If `namespace` provided, generated metric name will be added `namespace.` prefix.
- `metrics_flush_interval` (default: `60s`): Defines the flush interval of the generated metrics.
- `metrics_expiration` (default: `0`): Defines the expiration time as `time.Duration`, after which, if no new spans are received, metrics will no longer be exported. Setting to `0` means the metrics will never expire (default behavior).
- `metric_timestamp_cache_size` (default `1000`): Only relevant for delta temporality span metrics. Controls the size of the cache used to keep track of a metric's TimestampUnixNano the last time it was flushed. When a metric is evicted from the cache, its next data point will indicate a "reset" in the series. Downstream components converting from delta to cumulative, like `prometheusexporter`, may handle these resets by setting cumulative counters back to 0.
@@ -122,6 +122,8 @@ The following settings can be optionally configured:
- `dimensions`: (mandatory if `enabled`) the list of the span's event attributes to add as dimensions to the events metric, which will be included _on top of_ the common and configured `dimensions` for span and resource attributes.
- `resource_metrics_key_attributes`: Filter the resource attributes used to produce the resource metrics key map hash. Use this in case changing resource attributes (e.g. process id) are breaking counter metrics.
+The feature gate `connector.spanmetrics.legacyMetricNames` (disabled by default) controls the connector to use legacy metric names.
+
## Examples
The following is a simple example usage of the `spanmetrics` connector.
diff --git a/connector/spanmetricsconnector/config_test.go b/connector/spanmetricsconnector/config_test.go
index 56f1928338b8..cb2e45ff0a9c 100644
--- a/connector/spanmetricsconnector/config_test.go
+++ b/connector/spanmetricsconnector/config_test.go
@@ -50,6 +50,7 @@ func TestLoadConfig(t *testing.T) {
{Name: "http.method", Default: &defaultMethod},
{Name: "http.status_code", Default: (*string)(nil)},
},
+ Namespace: DefaultNamespace,
DimensionsCacheSize: 1500,
ResourceMetricsCacheSize: 1600,
MetricsFlushInterval: 30 * time.Second,
@@ -70,6 +71,7 @@ func TestLoadConfig(t *testing.T) {
{
id: component.NewIDWithName(metadata.Type, "exponential_histogram"),
expected: &Config{
+ Namespace: DefaultNamespace,
AggregationTemporality: cumulative,
DimensionsCacheSize: defaultDimensionsCacheSize,
ResourceMetricsCacheSize: defaultResourceMetricsCacheSize,
@@ -103,6 +105,7 @@ func TestLoadConfig(t *testing.T) {
MetricsFlushInterval: 60 * time.Second,
Histogram: HistogramConfig{Disable: false, Unit: defaultUnit},
Exemplars: ExemplarsConfig{Enabled: true},
+ Namespace: DefaultNamespace,
},
},
{
@@ -114,6 +117,7 @@ func TestLoadConfig(t *testing.T) {
MetricsFlushInterval: 60 * time.Second,
Histogram: HistogramConfig{Disable: false, Unit: defaultUnit},
Exemplars: ExemplarsConfig{Enabled: true, MaxPerDataPoint: &defaultMaxPerDatapoint},
+ Namespace: DefaultNamespace,
},
},
{
@@ -125,6 +129,7 @@ func TestLoadConfig(t *testing.T) {
ResourceMetricsKeyAttributes: []string{"service.name", "telemetry.sdk.language", "telemetry.sdk.name"},
MetricsFlushInterval: 60 * time.Second,
Histogram: HistogramConfig{Disable: false, Unit: defaultUnit},
+ Namespace: DefaultNamespace,
},
},
{
@@ -136,6 +141,7 @@ func TestLoadConfig(t *testing.T) {
ResourceMetricsCacheSize: defaultResourceMetricsCacheSize,
MetricsFlushInterval: 60 * time.Second,
Histogram: HistogramConfig{Disable: false, Unit: defaultUnit},
+ Namespace: DefaultNamespace,
},
},
{
@@ -146,6 +152,7 @@ func TestLoadConfig(t *testing.T) {
ResourceMetricsCacheSize: defaultResourceMetricsCacheSize,
MetricsFlushInterval: 60 * time.Second,
Histogram: HistogramConfig{Disable: false, Unit: defaultUnit},
+ Namespace: DefaultNamespace,
},
extraAssertions: func(config *Config) {
assert.Equal(t, defaultDeltaTimestampCacheSize, config.GetDeltaTimestampCacheSize())
diff --git a/connector/spanmetricsconnector/connector.go b/connector/spanmetricsconnector/connector.go
index 2cdc354df9d8..c3f8c897aa9e 100644
--- a/connector/spanmetricsconnector/connector.go
+++ b/connector/spanmetricsconnector/connector.go
@@ -295,14 +295,20 @@ func (p *connectorImp) buildMetrics() pmetric.Metrics {
return startTime
}
+ metricsNamespace := p.config.Namespace
+ if legacyMetricNamesFeatureGate.IsEnabled() && metricsNamespace == DefaultNamespace {
+ metricsNamespace = ""
+ }
+
sums := rawMetrics.sums
metric := sm.Metrics().AppendEmpty()
- metric.SetName(buildMetricName(p.config.Namespace, metricNameCalls))
+ metric.SetName(buildMetricName(metricsNamespace, metricNameCalls))
sums.BuildMetrics(metric, startTimeGenerator, timestamp, p.config.GetAggregationTemporality())
+
if !p.config.Histogram.Disable {
histograms := rawMetrics.histograms
metric = sm.Metrics().AppendEmpty()
- metric.SetName(buildMetricName(p.config.Namespace, metricNameDuration))
+ metric.SetName(buildMetricName(metricsNamespace, metricNameDuration))
metric.SetUnit(p.config.Histogram.Unit.String())
histograms.BuildMetrics(metric, startTimeGenerator, timestamp, p.config.GetAggregationTemporality())
}
@@ -310,7 +316,7 @@ func (p *connectorImp) buildMetrics() pmetric.Metrics {
events := rawMetrics.events
if p.events.Enabled {
metric = sm.Metrics().AppendEmpty()
- metric.SetName(buildMetricName(p.config.Namespace, metricNameEvents))
+ metric.SetName(buildMetricName(metricsNamespace, metricNameEvents))
events.BuildMetrics(metric, startTimeGenerator, timestamp, p.config.GetAggregationTemporality())
}
diff --git a/connector/spanmetricsconnector/connector_test.go b/connector/spanmetricsconnector/connector_test.go
index 893a32c2ae31..9c7042e7a6eb 100644
--- a/connector/spanmetricsconnector/connector_test.go
+++ b/connector/spanmetricsconnector/connector_test.go
@@ -18,6 +18,7 @@ import (
"go.opentelemetry.io/collector/connector/connectortest"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/consumer/consumertest"
+ "go.opentelemetry.io/collector/featuregate"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
@@ -1053,53 +1054,80 @@ func BenchmarkConnectorConsumeTraces(b *testing.B) {
}
func TestExcludeDimensionsConsumeTraces(t *testing.T) {
- excludeDimensions := []string{"span.kind", "span.name", "totallyWrongNameDoesNotAffectAnything"}
- p, err := newConnectorImp(stringp("defaultNullValue"), explicitHistogramsConfig, disabledExemplarsConfig, disabledEventsConfig, cumulative, 0, []string{}, 1000, clockwork.NewFakeClock(), excludeDimensions...)
- require.NoError(t, err)
- traces := buildSampleTrace()
- // Test
- ctx := metadata.NewIncomingContext(context.Background(), nil)
+ testcases := []struct {
+ dsc string
+ featureGateEnabled bool
+ }{
+ {
+ dsc: fmt.Sprintf("%s enabled", legacyMetricNamesFeatureGateID),
+ featureGateEnabled: true,
+ },
+ {
+ dsc: fmt.Sprintf("%s disabled", legacyMetricNamesFeatureGateID),
+ featureGateEnabled: false,
+ },
+ }
- err = p.ConsumeTraces(ctx, traces)
- require.NoError(t, err)
- metrics := p.buildMetrics()
+ excludeDimensions := []string{"span.kind", "span.name", "totallyWrongNameDoesNotAffectAnything"}
+ for _, tc := range testcases {
+ tc := tc
+ t.Run(tc.dsc, func(t *testing.T) {
+ // Set feature gate value
+ previousValue := legacyMetricNamesFeatureGate.IsEnabled()
+ require.NoError(t, featuregate.GlobalRegistry().Set(legacyMetricNamesFeatureGate.ID(), tc.featureGateEnabled))
+ defer func() {
+ require.NoError(t, featuregate.GlobalRegistry().Set(legacyMetricNamesFeatureGate.ID(), previousValue))
+ }()
+
+ p, err := newConnectorImp(stringp("defaultNullValue"), explicitHistogramsConfig, disabledExemplarsConfig, disabledEventsConfig, cumulative, 0, []string{}, 1000, clockwork.NewFakeClock(), excludeDimensions...)
+ require.NoError(t, err)
+ traces := buildSampleTrace()
- for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
- rm := metrics.ResourceMetrics().At(i)
- ism := rm.ScopeMetrics()
- // Checking all metrics, naming notice: ilmC/mC - C here is for Counter.
- for ilmC := 0; ilmC < ism.Len(); ilmC++ {
- m := ism.At(ilmC).Metrics()
- for mC := 0; mC < m.Len(); mC++ {
- metric := m.At(mC)
- // We check only sum and histogram metrics here, because for now only they are present in this module.
+ ctx := metadata.NewIncomingContext(context.Background(), nil)
- switch metric.Type() {
- case pmetric.MetricTypeExponentialHistogram, pmetric.MetricTypeHistogram:
- {
- dp := metric.Histogram().DataPoints()
- for dpi := 0; dpi < dp.Len(); dpi++ {
- for attributeKey := range dp.At(dpi).Attributes().AsRaw() {
- assert.NotContains(t, excludeDimensions, attributeKey)
- }
+ err = p.ConsumeTraces(ctx, traces)
+ require.NoError(t, err)
+ metrics := p.buildMetrics()
- }
- }
- case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge, pmetric.MetricTypeSum, pmetric.MetricTypeSummary:
- {
- dp := metric.Sum().DataPoints()
- for dpi := 0; dpi < dp.Len(); dpi++ {
- for attributeKey := range dp.At(dpi).Attributes().AsRaw() {
- assert.NotContains(t, excludeDimensions, attributeKey)
+ for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
+ rm := metrics.ResourceMetrics().At(i)
+ ism := rm.ScopeMetrics()
+ // Checking all metrics, naming notice: ilmC/mC - C here is for Counter.
+ for ilmC := 0; ilmC < ism.Len(); ilmC++ {
+ m := ism.At(ilmC).Metrics()
+ for mC := 0; mC < m.Len(); mC++ {
+ metric := m.At(mC)
+ // We check only sum and histogram metrics here, because for now only they are present in this module.
+
+ switch metric.Type() {
+ case pmetric.MetricTypeExponentialHistogram, pmetric.MetricTypeHistogram:
+ {
+ dp := metric.Histogram().DataPoints()
+ for dpi := 0; dpi < dp.Len(); dpi++ {
+ for attributeKey := range dp.At(dpi).Attributes().AsRaw() {
+ assert.NotContains(t, excludeDimensions, attributeKey)
+ }
+
+ }
+ }
+ case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge, pmetric.MetricTypeSum, pmetric.MetricTypeSummary:
+ {
+ dp := metric.Sum().DataPoints()
+ for dpi := 0; dpi < dp.Len(); dpi++ {
+ for attributeKey := range dp.At(dpi).Attributes().AsRaw() {
+ assert.NotContains(t, excludeDimensions, attributeKey)
+ }
+ }
}
+
}
- }
+ }
}
-
}
- }
+
+ })
}
}
diff --git a/connector/spanmetricsconnector/factory.go b/connector/spanmetricsconnector/factory.go
index b153fde6bb31..4f921ed8fe1c 100644
--- a/connector/spanmetricsconnector/factory.go
+++ b/connector/spanmetricsconnector/factory.go
@@ -13,10 +13,28 @@ import (
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/connector"
"go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/featuregate"
"github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector/internal/metadata"
)
+const (
+ DefaultNamespace = "traces.span.metrics"
+ legacyMetricNamesFeatureGateID = "connector.spanmetrics.legacyMetricNames"
+)
+
+var legacyMetricNamesFeatureGate *featuregate.Gate
+
+func init() {
+ // TODO: Remove this feature gate when the legacy metric names are removed.
+ legacyMetricNamesFeatureGate = featuregate.GlobalRegistry().MustRegister(
+ legacyMetricNamesFeatureGateID,
+ featuregate.StageAlpha, // Alpha because we want it disabled by default.
+ featuregate.WithRegisterDescription("When enabled, connector uses legacy metric names."),
+ featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33227"),
+ )
+}
+
// NewFactory creates a factory for the spanmetrics connector.
func NewFactory() connector.Factory {
return connector.NewFactory(
@@ -33,6 +51,7 @@ func createDefaultConfig() component.Config {
ResourceMetricsCacheSize: defaultResourceMetricsCacheSize,
MetricsFlushInterval: 60 * time.Second,
Histogram: HistogramConfig{Disable: false, Unit: defaultUnit},
+ Namespace: DefaultNamespace,
}
}
diff --git a/connector/spanmetricsconnector/go.mod b/connector/spanmetricsconnector/go.mod
index ccead95f0970..de2e7bcbb8ed 100644
--- a/connector/spanmetricsconnector/go.mod
+++ b/connector/spanmetricsconnector/go.mod
@@ -15,6 +15,7 @@ require (
go.opentelemetry.io/collector/connector v0.108.2-0.20240829190554-7da6b618a7ee
go.opentelemetry.io/collector/consumer v0.108.2-0.20240829190554-7da6b618a7ee
go.opentelemetry.io/collector/consumer/consumertest v0.108.2-0.20240829190554-7da6b618a7ee
+ go.opentelemetry.io/collector/featuregate v1.14.2-0.20240829190554-7da6b618a7ee
go.opentelemetry.io/collector/pdata v1.14.2-0.20240829190554-7da6b618a7ee
go.opentelemetry.io/collector/semconv v0.108.2-0.20240829190554-7da6b618a7ee
go.uber.org/goleak v1.3.0
@@ -31,6 +32,7 @@ require (
github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/uuid v1.6.0 // indirect
+ github.com/hashicorp/go-version v1.7.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/knadh/koanf/maps v0.1.1 // indirect
diff --git a/connector/spanmetricsconnector/go.sum b/connector/spanmetricsconnector/go.sum
index 5fc56db8e2d3..0109ab036b88 100644
--- a/connector/spanmetricsconnector/go.sum
+++ b/connector/spanmetricsconnector/go.sum
@@ -19,6 +19,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
+github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
@@ -92,6 +94,8 @@ go.opentelemetry.io/collector/consumer/consumerprofiles v0.108.2-0.2024082919055
go.opentelemetry.io/collector/consumer/consumerprofiles v0.108.2-0.20240829190554-7da6b618a7ee/go.mod h1:F6Shxg3TqoDZe2+p2PkVRUsnJMqATQxbs4c1nfaJrAc=
go.opentelemetry.io/collector/consumer/consumertest v0.108.2-0.20240829190554-7da6b618a7ee h1:258eNHF/i76AAD7ETHoKT5osyCe+UdDVWIgVyNyMH2c=
go.opentelemetry.io/collector/consumer/consumertest v0.108.2-0.20240829190554-7da6b618a7ee/go.mod h1:tRqOtUukG76iBlPTAUwFSU87dDO+x33Gyn2x9mecfko=
+go.opentelemetry.io/collector/featuregate v1.14.2-0.20240829190554-7da6b618a7ee h1:kOnDlEC1EDsymDjAZe8LZhwbVqnWPtPwSlUXDYwUmws=
+go.opentelemetry.io/collector/featuregate v1.14.2-0.20240829190554-7da6b618a7ee/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
go.opentelemetry.io/collector/pdata v1.14.2-0.20240829190554-7da6b618a7ee h1:vqnRTZckt4i+bGcR+d/LAOAe4nAAs/KkN2L2CTXvW0Q=
go.opentelemetry.io/collector/pdata v1.14.2-0.20240829190554-7da6b618a7ee/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8=
go.opentelemetry.io/collector/pdata/pprofile v0.108.2-0.20240829190554-7da6b618a7ee h1:ImK86bzHqdYHH1jNXKJLFld3stY9LuqroUvTAq3+Pws=
diff --git a/exporter/lokiexporter/README.md b/exporter/lokiexporter/README.md
index 6fe9d3aa5966..917cf86b90c1 100644
--- a/exporter/lokiexporter/README.md
+++ b/exporter/lokiexporter/README.md
@@ -15,9 +15,204 @@ Exports data via HTTP to [Loki](https://grafana.com/docs/loki/latest/).
## Deprecation notice
-This component is **deprecated**: Loki now supports native [OTLP ingestion](https://grafana.com/docs/loki/latest/send-data/otel/) starting from v3. Grafana Cloud also supports [OTLP native ingestion](https://grafana.com/docs/grafana-cloud/send-data/otlp/send-data-otlp) for logs. This component will be removed in September 2024.
+This component is **deprecated**: Loki now supports native [OTLP ingestion](https://grafana.com/docs/loki/latest/send-data/otel/) starting from v3. Grafana Cloud also supports [OTLP native ingestion](https://grafana.com/docs/grafana-cloud/send-data/otlp/send-data-otlp) for logs. This component will be removed in November 2024.
-## Getting Started
+
+### Benefits of the new Loki OpenTelemetry log format
+
+The new format for OpenTelemetry logs introduced in Loki V3 brings the following benefits:
+
+* Native support for the structure of OpenTelemetry logs enabling simpler querying (no more JSON parsing).
+* Simplified client configuration to send OpenTelemetry data using the standard OTLP protocol.
+
+### Loki log message format changes for OpenTelemetry logs
+
+See OpenTelemetry Logs Data Model specification [here](https://opentelemetry.io/docs/specs/otel/logs/data-model/).
+
+| OpenTelemetry log field | Pre Loki V3 | Loki V3 through the Loki OTLP Endpoint |
+| ----- | ----- | ----- |
+| [`Timestamp`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-timestamp) | `timestamp` | `timestamp` |
+| [`ObservedTimestamp`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-observedtimestamp) | Not available | `metadata[observed_timestamp]` |
+| [`TraceId`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-traceid) | `traceid` field of the Loki JSON log message | `metadata[trace_id]` |
+| [`SpanId`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-spanid) | `spanid` field of the Loki JSON log message | `metadata[span_id]` |
+| [`TraceFlags`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-traceflags) | Not available | `metadata[flags]` |
+| [`SeverityText`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitytext) | `severity` field of the JSON log message (e.g. `Information`) and `level` label (e.g. `ERROR`, `INFO`...), the `detected_level` label is also available | `metadata[severity_text]`, the `detected_level` label is also available |
+| [`SeverityNumber`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber) | Not available | `metadata[severity_number]` |
+| [`Body`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-body) | `body` field of the Loki JSON log message | The Loki log message. `__line__`in LogQL functions (e.g. `line_format`)|
+| [`InstrumentationScope`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-instrumentationscope) | `instrumentation_scope_name` field of the JSON log message | `metadata[scope_name]` |
+| [`Attributes`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-attributes) | JSON fields of the Loki log message | `metadata[xyz]` Where `xyz` is the `_` version of the OTel attribute name (e.g. `thread_name` Loki metadata for the `thread.name` OpenTelemetry attribute)|
+| [`Resource`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-resource) | `service.name`, `service.namespace`, and `service.instance.id` are promoted as the following labels: `job=[${service.namespace}/]${service.name}`, instance=${service.instance.id}, exporter="OTLP"`. Other resource attributes are stored as JSON fields of the Loki log message with the prefix `resources_` (e.g. `resources_k8s_namespace_name`) | Default list of resource attributes promoted as Loki labels: `cloud.availability_zone`, `cloud.region`, `container.name`, `deployment.environment`, `k8s.cluster.name`, `k8s.container.name`, `k8s.cronjob.name`, `k8s.daemonset.name`, `k8s.deployment.name`, `k8s.job.name`, `k8s.namespace.name`, `k8s.pod.name`, `k8s.replicaset.name` `k8s.statefulset.name`, `service.instance.id`, `service.name`, `service.namespace`.
Other resource attributes are by default promoted as Loki message metadata.
ℹ️ The list of promoted resource attributes is configurable using Loki’s distributor config parameter `default_resource_attributes_as_index_labels` when using self managed Loki ([here](https://grafana.com/docs/loki/latest/configure/\#distributor)) or opening a support request when using Grafana Cloud |
+
+ℹ️ Additional conversion rules from OpenTelemetry Logs to Loki
+
+* All special characters, including `.` in attribute and resource attribute names, are converted into `_` when they are mapped as Loki labels or metadata.
+* OTel attribute values with complex data types (i.e. arrays, nested structures) are converted into JSON strings
+
+### Migration instructions
+
+#### Instrumentation migration
+
+No changes are needed in the instrumentation layer. OpenTelemetry logs sources like OpenTelemetry SDKs or the [OpenTelemetry Collector File Log Receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver) don’t have to be modified.
+
+#### Logs collection migration
+
+Replace the OpenTelemetry Collector Loki Exporter by the [OpenTelemetry Collector OTLP HTTP Exporter](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter) as OpenTelemetry logs should now be exported as is to the Loki OTLP endpoint.
+
+
+OpenTelemetry Collector configuration migration
+
+```yaml
+========================
+= BEFORE MIGRATION =
+========================
+
+extensions:
+ basicauth/loki:
+ client_auth:
+ username: <>
+ password: <>
+
+exporters:
+ loki:
+ auth:
+ authenticator: basicauth/loki
+ endpoint: https://loki.example.com:3100/loki/api/v1/push
+
+service:
+ extensions: [basicauth/loki]
+ pipelines:
+ logs:
+ receivers: [...]
+ processors: [...]
+ exporters: [loki, ...]
+
+
+========================
+= AFTER MIGRATION =
+========================
+
+extensions:
+ basicauth/loki:
+ client_auth:
+ username: <>
+ password: <>
+
+exporters:
+ otlphttp/loki:
+ auth:
+ authenticator: basicauth/loki
+ endpoint: http://loki.example.com:3100/otlp/v1/logs
+
+service:
+ extensions: [basicauth/loki]
+ pipelines:
+ logs:
+ receivers: [...]
+ processors: [...]
+ exporters: [otlphttp/loki, ...]
+```
+
+* When using Grafana Cloud, the [Grafana Cloud OTLP endpoint](https://grafana.com/docs/grafana-cloud/send-data/otlp/send-data-otlp/) should be used instead of the Loki OTLP endpoint. The connection details of the Grafana Cloud OTLP endpoint, OTLP HTTP URL and credentials are available using the Grafana Cloud "OpenTelemetry Collector" connection tile.
+* The promotion of OpenTelemetry attributes and resource attributes to Loki labels using the `loki.attribute.labels` and `loki.resource.labels` hints is replaced by the list of promoted attributes managed centrally in Loki.
+* The default list of resource attributes promoted as labels (see above) should be sufficient for most use cases.
+* ℹ️ Changes can be made to this list using the Loki distributor configuration parameter `default_resource_attributes_as_index_labels` ([here](https://grafana.com/docs/loki/latest/configure/\#distributor)) for self managed instances and opening a support ticket for Grafana Cloud.
+
+#### LogQL queries migration
+
+##### From `job` and `instance` to `service_name`, `service_namespace`, and `service_instance_id`
+
+The Loki labels `job` and `instance` are no longer generated and are replaced by the `service_name`, `service_namespace`, and `service_instance_id` labels.
+
+Example:
+
+```
+BEFORE
+{job="ecommerce/frontend", instance="instance-1234567890"}
+
+AFTER
+{service_name="frontend", service_namespace="ecommerce", service_instance_id="instance-1234567890"}
+```
+
+##### From `| json | an_attribute=...` to `{an_attribute=...}` or `| an_attribute=...`
+
+OTel log attributes, resource attributes, and fields are no longer stored in the JSON message but are stored as:
+* Loki message labels for promoted resource attributes (see list above),
+* Loki message metadata for other resource attributes, log attributes, and log fields.
+
+LogQL statements `| json | an_attribute=...` must be converted to:
+
+* Promoted resource attributes: `{an_attribute=...}`
+* For other resource attributes, log attributes, and log fields: `| an_attribute=...`
+
+Example:
+
+```
+BEFORE
+{exporter="OTLP", job="frontend"} | json | resources_deployment_environment="production"
+
+AFTER
+{service_name="frontend"} | deployment_environment="production"
+```
+
+##### From `| json | traceid=...` and `| json | spanid=...` to `| trace_id=...` and `| span_id=...`
+
+The log fields `SpanID` and `TraceId` were stored as the JSON fields `spanid` and `traceid`; they are now stored as `metadata[span_id]` and `metadata[trace_id]`, LogQL queries must be changed accordingly.
+
+`TraceID` filters like `| json | traceid=<> ...` and `|= <> ...` must be converted to `| trace_id=<> ...` where `<>` and <> are the values you search for.
+Similarly, `SpanID` filters like `| json | spanid=<> ...` and `|=<> ...` must be converted to `| span_id=<> ...`.
+
+Example:
+
+```
+BEFORE
+{exporter="OTLP", job="/frontend"} |= "00960a472ea5b87954ca07902d66f914"
+
+AFTER
+{service_name="frontend"} | trace_id="00960a472ea5b87954ca07902d66f914"
+```
+
+##### From `line_format {{.body}}` to `line_format {{__line__}}`
+
+The `{{.body}}` element of the JSON payload that used to hold the OTel log message body is now the message of the Loki log line and should be referenced as `{{__line__}}` in `line_format` calls.
+
+Example:
+
+```
+BEFORE
+{exporter="OTLP", job="frontend"} | json | line_format `[{{.level}}] {{.body}}`
+
+AFTER
+{service_name="frontend"} | line_format `[{{.detected_level}}] {{__line__}}`
+```
+
+#### Grafana visualizations migration
+
+##### Tempo data source: Trace to Logs
+
+To enable the "trace to logs" navigation from Tempo to Loki, navigate to the Grafana Tempo data source configuration screen, in the "Trace to logs" section, select "use custom query" and specify the query:
+
+```
+{service_name="${__span.tags["service.name"]}"} | trace_id="${__span.traceId}"
+```
+
+##### Loki data source: Log to Trace
+
+To enable the "logs to trace" navigation from Loki to Tempo, navigate to the Grafana Loki data source configuration screen, in the "Derived fields" section, update or create a derived field with:
+* Name: `Trace ID`
+* Type: `Label` (this `Label` name is missleading because it also supports Loki message metadata)
+* Label: `trace_id`
+* Internal link: activated
+* Select the Tempo data source on which "trace to logs" is configured
+
+
+### See Also
+
+* [Loki documentation / Ingesting OpenTelemetry logs](https://grafana.com/docs/loki/latest/send-data/otel/)
+
+
+
+
+## Getting Started with the deprecated OpenTelemetry Collector Loki Exporter
The following settings are required: