diff --git a/examples/add-receiver-creator/rendered_manifests/clusterRole.yaml b/examples/add-receiver-creator/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/add-receiver-creator/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/add-receiver-creator/rendered_manifests/clusterRoleBinding.yaml b/examples/add-receiver-creator/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/add-receiver-creator/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/add-receiver-creator/rendered_manifests/configmap-agent.yaml b/examples/add-receiver-creator/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..d0eabeb096
--- /dev/null
+++ b/examples/add-receiver-creator/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,289 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ kubeletstats:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10250
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers:
+ smartagent/coredns:
+ config:
+ extraDimensions:
+ metric_source: k8s-coredns
+ port: 9153
+ type: coredns
+ rule: type == "pod" && labels["k8s-app"] == "kube-dns"
+ smartagent/kube-controller-manager:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-controller-manager
+ port: 10257
+ skipVerify: true
+ type: kube-controller-manager
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "pod" && labels["k8s-app"] == "kube-controller-manager"
+ smartagent/kubernetes-apiserver:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-apiserver
+ skipVerify: true
+ type: kubernetes-apiserver
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "port" && port == 443 && pod.labels["k8s-app"] == "kube-apiserver"
+ smartagent/kubernetes-proxy:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-proxy
+ port: 10249
+ type: kubernetes-proxy
+ rule: type == "pod" && labels["k8s-app"] == "kube-proxy"
+ smartagent/kubernetes-scheduler:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-scheduler
+ port: 10251
+ type: kubernetes-scheduler
+ rule: type == "pod" && labels["k8s-app"] == "kube-scheduler"
+ smartagent/postgresql:
+ config:
+ connectionString: sslmode=disable user={{.username}} password={{.password}}
+ params:
+ password: password
+ username: postgres
+ port: 5432
+ type: postgresql
+ rule: type == "port" && port == 5432
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ - signalfx
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/add-receiver-creator/rendered_manifests/configmap-cluster-receiver.yaml b/examples/add-receiver-creator/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..ae040bb721
--- /dev/null
+++ b/examples/add-receiver-creator/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,108 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/add-receiver-creator/rendered_manifests/daemonset.yaml b/examples/add-receiver-creator/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..f65abb5ab5
--- /dev/null
+++ b/examples/add-receiver-creator/rendered_manifests/daemonset.yaml
@@ -0,0 +1,193 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: 807eebdc4a0d6774eae94a0f0b7d2e95525661265184ac9b5bb62cd05bc45584
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ ports:
+ - name: jaeger-grpc
+ containerPort: 14250
+ hostPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ hostPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: sfx-forwarder
+ containerPort: 9080
+ hostPort: 9080
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ hostPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ hostPort: 9411
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ # Env variables for host metrics receiver
+ - name: HOST_PROC
+ value: /hostfs/proc
+ - name: HOST_SYS
+ value: /hostfs/sys
+ - name: HOST_ETC
+ value: /hostfs/etc
+ - name: HOST_VAR
+ value: /hostfs/var
+ - name: HOST_RUN
+ value: /hostfs/run
+ - name: HOST_DEV
+ value: /hostfs/dev
+ # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879
+ # is resolved fall back to previous gopsutil mountinfo path:
+ # https://github.com/shirou/gopsutil/issues/1271
+ - name: HOST_PROC_MOUNTINFO
+ value: /proc/self/mountinfo
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ - mountPath: /hostfs/dev
+ name: host-dev
+ readOnly: true
+ - mountPath: /hostfs/etc
+ name: host-etc
+ readOnly: true
+ - mountPath: /hostfs/proc
+ name: host-proc
+ readOnly: true
+ - mountPath: /hostfs/run/udev/data
+ name: host-run-udev-data
+ readOnly: true
+ - mountPath: /hostfs/sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /hostfs/var/run/utmp
+ name: host-var-run-utmp
+ readOnly: true
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: host-dev
+ hostPath:
+ path: /dev
+ - name: host-etc
+ hostPath:
+ path: /etc
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-run-udev-data
+ hostPath:
+ path: /run/udev/data
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-var-run-utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/add-receiver-creator/rendered_manifests/deployment-cluster-receiver.yaml b/examples/add-receiver-creator/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..f474b9b96e
--- /dev/null
+++ b/examples/add-receiver-creator/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: 94371fe9c8062ad6c2eb9da843086ee092b3d1ddc2753b9f8198e6a422c5a20c
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/add-receiver-creator/rendered_manifests/secret-splunk.yaml b/examples/add-receiver-creator/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/add-receiver-creator/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/add-receiver-creator/rendered_manifests/serviceAccount.yaml b/examples/add-receiver-creator/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/add-receiver-creator/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/add-sampler/rendered_manifests/clusterRole.yaml b/examples/add-sampler/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/add-sampler/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/add-sampler/rendered_manifests/clusterRoleBinding.yaml b/examples/add-sampler/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/add-sampler/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/add-sampler/rendered_manifests/configmap-agent.yaml b/examples/add-sampler/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..cd5ae8a951
--- /dev/null
+++ b/examples/add-sampler/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,284 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ probabilistic_sampler:
+ hash_seed: 22
+ sampling_percentage: 15.3
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ kubeletstats:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10250
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers:
+ smartagent/coredns:
+ config:
+ extraDimensions:
+ metric_source: k8s-coredns
+ port: 9153
+ type: coredns
+ rule: type == "pod" && labels["k8s-app"] == "kube-dns"
+ smartagent/kube-controller-manager:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-controller-manager
+ port: 10257
+ skipVerify: true
+ type: kube-controller-manager
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "pod" && labels["k8s-app"] == "kube-controller-manager"
+ smartagent/kubernetes-apiserver:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-apiserver
+ skipVerify: true
+ type: kubernetes-apiserver
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "port" && port == 443 && pod.labels["k8s-app"] == "kube-apiserver"
+ smartagent/kubernetes-proxy:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-proxy
+ port: 10249
+ type: kubernetes-proxy
+ rule: type == "pod" && labels["k8s-app"] == "kube-proxy"
+ smartagent/kubernetes-scheduler:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-scheduler
+ port: 10251
+ type: kubernetes-scheduler
+ rule: type == "pod" && labels["k8s-app"] == "kube-scheduler"
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ - signalfx
+ processors:
+ - memory_limiter
+ - probabilistic_sampler
+ - k8sattributes
+ - batch
+ - resource
+ - resourcedetection
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/add-sampler/rendered_manifests/configmap-cluster-receiver.yaml b/examples/add-sampler/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..ae040bb721
--- /dev/null
+++ b/examples/add-sampler/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,108 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/add-sampler/rendered_manifests/daemonset.yaml b/examples/add-sampler/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..dc8bfd62d1
--- /dev/null
+++ b/examples/add-sampler/rendered_manifests/daemonset.yaml
@@ -0,0 +1,193 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: fb7b212cc914fce0178fb811a13ec32be64b73489d87cef5249462f9b9d29e2b
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ ports:
+ - name: jaeger-grpc
+ containerPort: 14250
+ hostPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ hostPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: sfx-forwarder
+ containerPort: 9080
+ hostPort: 9080
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ hostPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ hostPort: 9411
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ # Env variables for host metrics receiver
+ - name: HOST_PROC
+ value: /hostfs/proc
+ - name: HOST_SYS
+ value: /hostfs/sys
+ - name: HOST_ETC
+ value: /hostfs/etc
+ - name: HOST_VAR
+ value: /hostfs/var
+ - name: HOST_RUN
+ value: /hostfs/run
+ - name: HOST_DEV
+ value: /hostfs/dev
+ # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879
+ # is resolved fall back to previous gopsutil mountinfo path:
+ # https://github.com/shirou/gopsutil/issues/1271
+ - name: HOST_PROC_MOUNTINFO
+ value: /proc/self/mountinfo
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ - mountPath: /hostfs/dev
+ name: host-dev
+ readOnly: true
+ - mountPath: /hostfs/etc
+ name: host-etc
+ readOnly: true
+ - mountPath: /hostfs/proc
+ name: host-proc
+ readOnly: true
+ - mountPath: /hostfs/run/udev/data
+ name: host-run-udev-data
+ readOnly: true
+ - mountPath: /hostfs/sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /hostfs/var/run/utmp
+ name: host-var-run-utmp
+ readOnly: true
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: host-dev
+ hostPath:
+ path: /dev
+ - name: host-etc
+ hostPath:
+ path: /etc
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-run-udev-data
+ hostPath:
+ path: /run/udev/data
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-var-run-utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/add-sampler/rendered_manifests/deployment-cluster-receiver.yaml b/examples/add-sampler/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..f474b9b96e
--- /dev/null
+++ b/examples/add-sampler/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: 94371fe9c8062ad6c2eb9da843086ee092b3d1ddc2753b9f8198e6a422c5a20c
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/add-sampler/rendered_manifests/secret-splunk.yaml b/examples/add-sampler/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/add-sampler/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/add-sampler/rendered_manifests/serviceAccount.yaml b/examples/add-sampler/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/add-sampler/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/collector-agent-only/rendered_manifests/clusterRole.yaml b/examples/collector-agent-only/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/collector-agent-only/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/collector-agent-only/rendered_manifests/clusterRoleBinding.yaml b/examples/collector-agent-only/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/collector-agent-only/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/collector-agent-only/rendered_manifests/configmap-agent.yaml b/examples/collector-agent-only/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..8f9144180f
--- /dev/null
+++ b/examples/collector-agent-only/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,280 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ kubeletstats:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10250
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers:
+ smartagent/coredns:
+ config:
+ extraDimensions:
+ metric_source: k8s-coredns
+ port: 9153
+ type: coredns
+ rule: type == "pod" && labels["k8s-app"] == "kube-dns"
+ smartagent/kube-controller-manager:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-controller-manager
+ port: 10257
+ skipVerify: true
+ type: kube-controller-manager
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "pod" && labels["k8s-app"] == "kube-controller-manager"
+ smartagent/kubernetes-apiserver:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-apiserver
+ skipVerify: true
+ type: kubernetes-apiserver
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "port" && port == 443 && pod.labels["k8s-app"] == "kube-apiserver"
+ smartagent/kubernetes-proxy:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-proxy
+ port: 10249
+ type: kubernetes-proxy
+ rule: type == "pod" && labels["k8s-app"] == "kube-proxy"
+ smartagent/kubernetes-scheduler:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-scheduler
+ port: 10251
+ type: kubernetes-scheduler
+ rule: type == "pod" && labels["k8s-app"] == "kube-scheduler"
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ - signalfx
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/collector-agent-only/rendered_manifests/daemonset.yaml b/examples/collector-agent-only/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..7766ddb26a
--- /dev/null
+++ b/examples/collector-agent-only/rendered_manifests/daemonset.yaml
@@ -0,0 +1,193 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: f24285909af0884c7557482977a7a54aa1294e3a121a5cf78d7572a19fc5bafd
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ ports:
+ - name: jaeger-grpc
+ containerPort: 14250
+ hostPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ hostPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: sfx-forwarder
+ containerPort: 9080
+ hostPort: 9080
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ hostPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ hostPort: 9411
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ # Env variables for host metrics receiver
+ - name: HOST_PROC
+ value: /hostfs/proc
+ - name: HOST_SYS
+ value: /hostfs/sys
+ - name: HOST_ETC
+ value: /hostfs/etc
+ - name: HOST_VAR
+ value: /hostfs/var
+ - name: HOST_RUN
+ value: /hostfs/run
+ - name: HOST_DEV
+ value: /hostfs/dev
+ # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879
+ # is resolved fall back to previous gopsutil mountinfo path:
+ # https://github.com/shirou/gopsutil/issues/1271
+ - name: HOST_PROC_MOUNTINFO
+ value: /proc/self/mountinfo
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ - mountPath: /hostfs/dev
+ name: host-dev
+ readOnly: true
+ - mountPath: /hostfs/etc
+ name: host-etc
+ readOnly: true
+ - mountPath: /hostfs/proc
+ name: host-proc
+ readOnly: true
+ - mountPath: /hostfs/run/udev/data
+ name: host-run-udev-data
+ readOnly: true
+ - mountPath: /hostfs/sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /hostfs/var/run/utmp
+ name: host-var-run-utmp
+ readOnly: true
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: host-dev
+ hostPath:
+ path: /dev
+ - name: host-etc
+ hostPath:
+ path: /etc
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-run-udev-data
+ hostPath:
+ path: /run/udev/data
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-var-run-utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/collector-agent-only/rendered_manifests/secret-splunk.yaml b/examples/collector-agent-only/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/collector-agent-only/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/collector-agent-only/rendered_manifests/serviceAccount.yaml b/examples/collector-agent-only/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/collector-agent-only/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/collector-cluster-receiver-only/rendered_manifests/clusterRole.yaml b/examples/collector-cluster-receiver-only/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/collector-cluster-receiver-only/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/collector-cluster-receiver-only/rendered_manifests/clusterRoleBinding.yaml b/examples/collector-cluster-receiver-only/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/collector-cluster-receiver-only/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/collector-cluster-receiver-only/rendered_manifests/configmap-cluster-receiver.yaml b/examples/collector-cluster-receiver-only/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..1d67706519
--- /dev/null
+++ b/examples/collector-cluster-receiver-only/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,144 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ splunk_hec/o11y:
+ disable_compression: true
+ endpoint: https://ingest.CHANGEME.signalfx.com/v1/log
+ log_data_enabled: true
+ profiling_data_enabled: false
+ token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ transform/add_sourcetype:
+ log_statements:
+ - context: log
+ statements:
+ - set(resource.attributes["com.splunk.sourcetype"], Concat(["kube:object:",
+ attributes["k8s.resource.name"]], ""))
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ k8sobjects:
+ auth_type: serviceAccount
+ objects:
+ - field_selector: status.phase=Running
+ interval: 15m
+ label_selector: environment in (production),tier in (frontend)
+ mode: pull
+ name: pods
+ - group: events.k8s.io
+ mode: watch
+ name: events
+ namespaces:
+ - default
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ logs/objects:
+ exporters:
+ - splunk_hec/o11y
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ - transform/add_sourcetype
+ receivers:
+ - k8sobjects
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/collector-cluster-receiver-only/rendered_manifests/deployment-cluster-receiver.yaml b/examples/collector-cluster-receiver-only/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..f9e0b1db5a
--- /dev/null
+++ b/examples/collector-cluster-receiver-only/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: c4743fac640a0ee010545732c3252ea72f78ec092f66f614314e541eb8687a3d
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/collector-cluster-receiver-only/rendered_manifests/secret-splunk.yaml b/examples/collector-cluster-receiver-only/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/collector-cluster-receiver-only/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/collector-cluster-receiver-only/rendered_manifests/serviceAccount.yaml b/examples/collector-cluster-receiver-only/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/collector-cluster-receiver-only/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/collector-gateway-only-advanced/rendered_manifests/clusterRole.yaml b/examples/collector-gateway-only-advanced/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/collector-gateway-only-advanced/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/collector-gateway-only-advanced/rendered_manifests/clusterRoleBinding.yaml b/examples/collector-gateway-only-advanced/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/collector-gateway-only-advanced/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/collector-gateway-only-advanced/rendered_manifests/configmap-gateway.yaml b/examples/collector-gateway-only-advanced/rendered_manifests/configmap-gateway.yaml
new file mode 100644
index 0000000000..13e4f9729f
--- /dev/null
+++ b/examples/collector-gateway-only-advanced/rendered_manifests/configmap-gateway.yaml
@@ -0,0 +1,200 @@
+---
+# Source: splunk-otel-collector/templates/configmap-gateway.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ sending_queue:
+ num_consumers: 32
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sending_queue:
+ num_consumers: 32
+ extensions:
+ health_check: null
+ http_forwarder:
+ egress:
+ endpoint: https://api.CHANGEME.signalfx.com
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource/add_cluster_name:
+ attributes:
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/collector:
+ config:
+ scrape_configs:
+ - job_name: otel-collector
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ signalfx:
+ access_token_passthrough: true
+ endpoint: 0.0.0.0:9943
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ - zpages
+ - http_forwarder
+ pipelines:
+ logs/signalfx-events:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ receivers:
+ - signalfx
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_cluster_name
+ receivers:
+ - otlp
+ - signalfx
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource/add_cluster_name
+ receivers:
+ - prometheus/collector
+ traces:
+ exporters:
+ - sapm
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resource/add_cluster_name
+ receivers:
+ - otlp
+ - jaeger
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/collector-gateway-only-advanced/rendered_manifests/deployment-gateway.yaml b/examples/collector-gateway-only-advanced/rendered_manifests/deployment-gateway.yaml
new file mode 100644
index 0000000000..9205bfe6b4
--- /dev/null
+++ b/examples/collector-gateway-only-advanced/rendered_manifests/deployment-gateway.yaml
@@ -0,0 +1,121 @@
+---
+# Source: splunk-otel-collector/templates/deployment-gateway.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-collector
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-collector
+ release: default
+ annotations:
+ checksum/config: 0a71c96ab49070efbab9792ac6bd2a7fd6c79456ffbd8742ee2dda17103c2ae6
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "0"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ ports:
+ - name: http-forwarder
+ containerPort: 6060
+ protocol: TCP
+ - name: jaeger-grpc
+ containerPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 2
+ memory: 4Gb
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-collector
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/collector-gateway-only-advanced/rendered_manifests/secret-splunk.yaml b/examples/collector-gateway-only-advanced/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/collector-gateway-only-advanced/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/collector-gateway-only-advanced/rendered_manifests/service.yaml b/examples/collector-gateway-only-advanced/rendered_manifests/service.yaml
new file mode 100644
index 0000000000..4d41becb65
--- /dev/null
+++ b/examples/collector-gateway-only-advanced/rendered_manifests/service.yaml
@@ -0,0 +1,57 @@
+---
+# Source: splunk-otel-collector/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-collector
+spec:
+ type: ClusterIP
+ ports:
+ - name: http-forwarder
+ port: 6060
+ targetPort: http-forwarder
+ protocol: TCP
+ - name: jaeger-grpc
+ port: 14250
+ targetPort: jaeger-grpc
+ protocol: TCP
+ - name: jaeger-thrift
+ port: 14268
+ targetPort: jaeger-thrift
+ protocol: TCP
+ - name: otlp
+ port: 4317
+ targetPort: otlp
+ protocol: TCP
+ - name: otlp-http
+ port: 4318
+ targetPort: otlp-http
+ protocol: TCP
+ - name: otlp-http-old
+ port: 55681
+ targetPort: otlp-http-old
+ protocol: TCP
+ - name: signalfx
+ port: 9943
+ targetPort: signalfx
+ protocol: TCP
+ - name: zipkin
+ port: 9411
+ targetPort: zipkin
+ protocol: TCP
+ selector:
+ app: splunk-otel-collector
+ component: otel-collector
+ release: default
diff --git a/examples/collector-gateway-only-advanced/rendered_manifests/serviceAccount.yaml b/examples/collector-gateway-only-advanced/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/collector-gateway-only-advanced/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/collector-gateway-only/rendered_manifests/clusterRole.yaml b/examples/collector-gateway-only/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/collector-gateway-only/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/collector-gateway-only/rendered_manifests/clusterRoleBinding.yaml b/examples/collector-gateway-only/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/collector-gateway-only/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/collector-gateway-only/rendered_manifests/configmap-gateway.yaml b/examples/collector-gateway-only/rendered_manifests/configmap-gateway.yaml
new file mode 100644
index 0000000000..13e4f9729f
--- /dev/null
+++ b/examples/collector-gateway-only/rendered_manifests/configmap-gateway.yaml
@@ -0,0 +1,200 @@
+---
+# Source: splunk-otel-collector/templates/configmap-gateway.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ sending_queue:
+ num_consumers: 32
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sending_queue:
+ num_consumers: 32
+ extensions:
+ health_check: null
+ http_forwarder:
+ egress:
+ endpoint: https://api.CHANGEME.signalfx.com
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource/add_cluster_name:
+ attributes:
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/collector:
+ config:
+ scrape_configs:
+ - job_name: otel-collector
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ signalfx:
+ access_token_passthrough: true
+ endpoint: 0.0.0.0:9943
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ - zpages
+ - http_forwarder
+ pipelines:
+ logs/signalfx-events:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ receivers:
+ - signalfx
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_cluster_name
+ receivers:
+ - otlp
+ - signalfx
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource/add_cluster_name
+ receivers:
+ - prometheus/collector
+ traces:
+ exporters:
+ - sapm
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resource/add_cluster_name
+ receivers:
+ - otlp
+ - jaeger
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/collector-gateway-only/rendered_manifests/deployment-gateway.yaml b/examples/collector-gateway-only/rendered_manifests/deployment-gateway.yaml
new file mode 100644
index 0000000000..e28c604bcb
--- /dev/null
+++ b/examples/collector-gateway-only/rendered_manifests/deployment-gateway.yaml
@@ -0,0 +1,121 @@
+---
+# Source: splunk-otel-collector/templates/deployment-gateway.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-collector
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-collector
+ release: default
+ annotations:
+ checksum/config: 0a71c96ab49070efbab9792ac6bd2a7fd6c79456ffbd8742ee2dda17103c2ae6
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "8192"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ ports:
+ - name: http-forwarder
+ containerPort: 6060
+ protocol: TCP
+ - name: jaeger-grpc
+ containerPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 4
+ memory: 8Gi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-collector
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/collector-gateway-only/rendered_manifests/secret-splunk.yaml b/examples/collector-gateway-only/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/collector-gateway-only/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/collector-gateway-only/rendered_manifests/service.yaml b/examples/collector-gateway-only/rendered_manifests/service.yaml
new file mode 100644
index 0000000000..4d41becb65
--- /dev/null
+++ b/examples/collector-gateway-only/rendered_manifests/service.yaml
@@ -0,0 +1,57 @@
+---
+# Source: splunk-otel-collector/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-collector
+spec:
+ type: ClusterIP
+ ports:
+ - name: http-forwarder
+ port: 6060
+ targetPort: http-forwarder
+ protocol: TCP
+ - name: jaeger-grpc
+ port: 14250
+ targetPort: jaeger-grpc
+ protocol: TCP
+ - name: jaeger-thrift
+ port: 14268
+ targetPort: jaeger-thrift
+ protocol: TCP
+ - name: otlp
+ port: 4317
+ targetPort: otlp
+ protocol: TCP
+ - name: otlp-http
+ port: 4318
+ targetPort: otlp-http
+ protocol: TCP
+ - name: otlp-http-old
+ port: 55681
+ targetPort: otlp-http-old
+ protocol: TCP
+ - name: signalfx
+ port: 9943
+ targetPort: signalfx
+ protocol: TCP
+ - name: zipkin
+ port: 9411
+ targetPort: zipkin
+ protocol: TCP
+ selector:
+ app: splunk-otel-collector
+ component: otel-collector
+ release: default
diff --git a/examples/collector-gateway-only/rendered_manifests/serviceAccount.yaml b/examples/collector-gateway-only/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/collector-gateway-only/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/crio-logging/rendered_manifests/clusterRole.yaml b/examples/crio-logging/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/crio-logging/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/crio-logging/rendered_manifests/clusterRoleBinding.yaml b/examples/crio-logging/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/crio-logging/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/crio-logging/rendered_manifests/configmap-agent.yaml b/examples/crio-logging/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..8f9144180f
--- /dev/null
+++ b/examples/crio-logging/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,280 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ kubeletstats:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10250
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers:
+ smartagent/coredns:
+ config:
+ extraDimensions:
+ metric_source: k8s-coredns
+ port: 9153
+ type: coredns
+ rule: type == "pod" && labels["k8s-app"] == "kube-dns"
+ smartagent/kube-controller-manager:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-controller-manager
+ port: 10257
+ skipVerify: true
+ type: kube-controller-manager
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "pod" && labels["k8s-app"] == "kube-controller-manager"
+ smartagent/kubernetes-apiserver:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-apiserver
+ skipVerify: true
+ type: kubernetes-apiserver
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "port" && port == 443 && pod.labels["k8s-app"] == "kube-apiserver"
+ smartagent/kubernetes-proxy:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-proxy
+ port: 10249
+ type: kubernetes-proxy
+ rule: type == "pod" && labels["k8s-app"] == "kube-proxy"
+ smartagent/kubernetes-scheduler:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-scheduler
+ port: 10251
+ type: kubernetes-scheduler
+ rule: type == "pod" && labels["k8s-app"] == "kube-scheduler"
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ - signalfx
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/crio-logging/rendered_manifests/configmap-cluster-receiver.yaml b/examples/crio-logging/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..ae040bb721
--- /dev/null
+++ b/examples/crio-logging/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,108 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/crio-logging/rendered_manifests/daemonset.yaml b/examples/crio-logging/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..7766ddb26a
--- /dev/null
+++ b/examples/crio-logging/rendered_manifests/daemonset.yaml
@@ -0,0 +1,193 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: f24285909af0884c7557482977a7a54aa1294e3a121a5cf78d7572a19fc5bafd
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ ports:
+ - name: jaeger-grpc
+ containerPort: 14250
+ hostPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ hostPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: sfx-forwarder
+ containerPort: 9080
+ hostPort: 9080
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ hostPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ hostPort: 9411
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ # Env variables for host metrics receiver
+ - name: HOST_PROC
+ value: /hostfs/proc
+ - name: HOST_SYS
+ value: /hostfs/sys
+ - name: HOST_ETC
+ value: /hostfs/etc
+ - name: HOST_VAR
+ value: /hostfs/var
+ - name: HOST_RUN
+ value: /hostfs/run
+ - name: HOST_DEV
+ value: /hostfs/dev
+ # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879
+ # is resolved fall back to previous gopsutil mountinfo path:
+ # https://github.com/shirou/gopsutil/issues/1271
+ - name: HOST_PROC_MOUNTINFO
+ value: /proc/self/mountinfo
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ - mountPath: /hostfs/dev
+ name: host-dev
+ readOnly: true
+ - mountPath: /hostfs/etc
+ name: host-etc
+ readOnly: true
+ - mountPath: /hostfs/proc
+ name: host-proc
+ readOnly: true
+ - mountPath: /hostfs/run/udev/data
+ name: host-run-udev-data
+ readOnly: true
+ - mountPath: /hostfs/sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /hostfs/var/run/utmp
+ name: host-var-run-utmp
+ readOnly: true
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: host-dev
+ hostPath:
+ path: /dev
+ - name: host-etc
+ hostPath:
+ path: /etc
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-run-udev-data
+ hostPath:
+ path: /run/udev/data
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-var-run-utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/crio-logging/rendered_manifests/deployment-cluster-receiver.yaml b/examples/crio-logging/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..f474b9b96e
--- /dev/null
+++ b/examples/crio-logging/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: 94371fe9c8062ad6c2eb9da843086ee092b3d1ddc2753b9f8198e6a422c5a20c
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/crio-logging/rendered_manifests/secret-splunk.yaml b/examples/crio-logging/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/crio-logging/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/crio-logging/rendered_manifests/serviceAccount.yaml b/examples/crio-logging/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/crio-logging/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/distribution-aks/rendered_manifests/clusterRole.yaml b/examples/distribution-aks/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/distribution-aks/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/distribution-aks/rendered_manifests/clusterRoleBinding.yaml b/examples/distribution-aks/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/distribution-aks/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/distribution-aks/rendered_manifests/configmap-agent.yaml b/examples/distribution-aks/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..7903923796
--- /dev/null
+++ b/examples/distribution-aks/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,242 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - aks
+ - azure
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ kubeletstats:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10250
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers: null
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ - signalfx
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/distribution-aks/rendered_manifests/configmap-cluster-receiver.yaml b/examples/distribution-aks/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..d668c58ef5
--- /dev/null
+++ b/examples/distribution-aks/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,110 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - aks
+ - azure
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/distribution-aks/rendered_manifests/daemonset.yaml b/examples/distribution-aks/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..f00e874b6a
--- /dev/null
+++ b/examples/distribution-aks/rendered_manifests/daemonset.yaml
@@ -0,0 +1,193 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: d1fd269e6b17d6b8512868f27e54053bc88559066e638d0f15b4e4f273d8ebd1
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ ports:
+ - name: jaeger-grpc
+ containerPort: 14250
+ hostPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ hostPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: sfx-forwarder
+ containerPort: 9080
+ hostPort: 9080
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ hostPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ hostPort: 9411
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ # Env variables for host metrics receiver
+ - name: HOST_PROC
+ value: /hostfs/proc
+ - name: HOST_SYS
+ value: /hostfs/sys
+ - name: HOST_ETC
+ value: /hostfs/etc
+ - name: HOST_VAR
+ value: /hostfs/var
+ - name: HOST_RUN
+ value: /hostfs/run
+ - name: HOST_DEV
+ value: /hostfs/dev
+ # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879
+ # is resolved fall back to previous gopsutil mountinfo path:
+ # https://github.com/shirou/gopsutil/issues/1271
+ - name: HOST_PROC_MOUNTINFO
+ value: /proc/self/mountinfo
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ - mountPath: /hostfs/dev
+ name: host-dev
+ readOnly: true
+ - mountPath: /hostfs/etc
+ name: host-etc
+ readOnly: true
+ - mountPath: /hostfs/proc
+ name: host-proc
+ readOnly: true
+ - mountPath: /hostfs/run/udev/data
+ name: host-run-udev-data
+ readOnly: true
+ - mountPath: /hostfs/sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /hostfs/var/run/utmp
+ name: host-var-run-utmp
+ readOnly: true
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: host-dev
+ hostPath:
+ path: /dev
+ - name: host-etc
+ hostPath:
+ path: /etc
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-run-udev-data
+ hostPath:
+ path: /run/udev/data
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-var-run-utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/distribution-aks/rendered_manifests/deployment-cluster-receiver.yaml b/examples/distribution-aks/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..77bec0acf9
--- /dev/null
+++ b/examples/distribution-aks/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: 921dc0fdb922c7db0d923f8bd0716f97b4609c76b11b740043f3d305d4d8112c
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/distribution-aks/rendered_manifests/secret-splunk.yaml b/examples/distribution-aks/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/distribution-aks/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/distribution-aks/rendered_manifests/serviceAccount.yaml b/examples/distribution-aks/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/distribution-aks/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/distribution-eks-fargate/rendered_manifests/clusterRole.yaml b/examples/distribution-eks-fargate/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..c88d571b05
--- /dev/null
+++ b/examples/distribution-eks-fargate/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,89 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - patch
diff --git a/examples/distribution-eks-fargate/rendered_manifests/clusterRoleBinding.yaml b/examples/distribution-eks-fargate/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/distribution-eks-fargate/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver-node-discoverer-script.yaml b/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver-node-discoverer-script.yaml
new file mode 100644
index 0000000000..4dc6a78278
--- /dev/null
+++ b/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver-node-discoverer-script.yaml
@@ -0,0 +1,62 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver-node-discoverer-script.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-cr-node-discoverer-script
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ script: |
+ #! /usr/bin/bash
+ set -ex
+
+ echo "Downloading yq"
+ curl -L -o yq https://github.com/mikefarah/yq/releases/download/v4.16.2/yq_linux_amd64
+ ACTUAL=$(sha256sum yq | awk '{print $1}')
+ if [ "${ACTUAL}" != "5c911c4da418ae64af5527b7ee36e77effb85de20c2ce732ed14c7f72743084d" ]; then
+ echo "will not attempt to use yq with unexpected sha256 (${ACTUAL} != 5c911c4da418ae64af5527b7ee36e77effb85de20c2ce732ed14c7f72743084d)"
+ exit 1
+ fi
+ chmod a+x yq
+
+ # If we are the first pod (cluster receiver), set the kubelet stats node filter to only follow labelled nodes.
+ # This node label will be set by the second pod.
+ if [[ "${K8S_POD_NAME}" == *-0 ]]; then
+ echo "will configure kubelet stats receiver to follow other StatefulSet replica's node, as well as use cluster receiver."
+ ./yq e '.receivers.receiver_creator.receivers.kubeletstats.rule = .receivers.receiver_creator.receivers.kubeletstats.rule + " && labels[\"splunk-otel-eks-fargate-kubeletstats-receiver-node\"] == \"true\""' /conf/relay.yaml >/splunk-messages/config.yaml
+ ./yq e -i '.extensions.k8s_observer.observe_pods = false' /splunk-messages/config.yaml
+ exit 0
+ fi
+
+ # Else we are the second pod (wide kubelet stats) label our node to be monitored by the first pod and disable the k8s_cluster receiver.
+ # Update our config to not monitor ourselves
+ echo "Labelling our fargate node to denote it hosts the cluster receiver"
+
+ # download kubectl (verifying checksum)
+ curl -o kubectl https://amazon-eks.s3.us-west-2.amazonaws.com/1.20.4/2021-04-12/bin/linux/amd64/kubectl
+ ACTUAL=$(sha256sum kubectl | awk '{print $1}')
+ if [ "${ACTUAL}" != "e84ff8c607b2a10f635c312403f9ede40a045404957e55adcf3d663f9e32c630" ]; then
+ echo "will not attempt to use kubectl with unexpected sha256 (${ACTUAL} != e84ff8c607b2a10f635c312403f9ede40a045404957e55adcf3d663f9e32c630)"
+ exit 1
+ fi
+ chmod a+x kubectl
+ # label node
+ ./kubectl label nodes $K8S_NODE_NAME splunk-otel-eks-fargate-kubeletstats-receiver-node=true
+
+ echo "Disabling k8s_cluster receiver for this instance"
+ # strip k8s_cluster and its pipeline
+ ./yq e 'del(.service.pipelines.metrics)' /conf/relay.yaml >/splunk-messages/config.yaml
+ ./yq e -i 'del(.receivers.k8s_cluster)' /splunk-messages/config.yaml
+
+ # set kubelet stats to not monitor ourselves (all other kubelets)
+ echo "Ensuring k8s_observer-based kubeletstats receivers won't monitor own node to avoid Fargate network limitation."
+ ./yq e -i '.receivers.receiver_creator.receivers.kubeletstats.rule = .receivers.receiver_creator.receivers.kubeletstats.rule + " && not ( name contains \"${K8S_NODE_NAME}\" )"' /splunk-messages/config.yaml
diff --git a/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver.yaml b/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..694c35f82e
--- /dev/null
+++ b/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,140 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ observe_nodes: true
+ observe_pods: true
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - eks
+ - ec2
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers:
+ kubeletstats:
+ config:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: '`endpoint`:`kubelet_endpoint_port`'
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ rule: type == "k8s.node" && name contains "fargate"
+ watch_observers:
+ - k8s_observer
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ - k8s_observer
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ metrics/eks:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ receivers:
+ - receiver_creator
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/distribution-eks-fargate/rendered_manifests/deployment-cluster-receiver.yaml b/examples/distribution-eks-fargate/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..f25a28fe05
--- /dev/null
+++ b/examples/distribution-eks-fargate/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,140 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 2
+ serviceName: default-splunk-otel-collector-k8s-cluster-receiver
+ podManagementPolicy: Parallel
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: 3d3f127636ca0e5fc74df736ee6a5a09fe58d1fdea833b73ecfd7b7264858122
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: component
+ operator: In
+ values:
+ - otel-k8s-cluster-receiver
+ topologyKey: kubernetes.io/hostname
+ initContainers:
+ - name: cluster-receiver-node-discoverer
+ image: public.ecr.aws/amazonlinux/amazonlinux:latest
+ imagePullPolicy: IfNotPresent
+ command: ["bash", "-c", "/splunk-scripts/init-eks-fargate-cluster-receiver.sh"]
+ env:
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ volumeMounts:
+ - name: init-eks-fargate-cluster-receiver-script
+ mountPath: /splunk-scripts
+ - name: messages
+ mountPath: /splunk-messages
+ - mountPath: /conf
+ name: collector-configmap
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/splunk-messages/config.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ - mountPath: /splunk-messages
+ name: messages
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
+ - name: init-eks-fargate-cluster-receiver-script
+ configMap:
+ name: default-splunk-otel-collector-cr-node-discoverer-script
+ items:
+ - key: script
+ path: init-eks-fargate-cluster-receiver.sh
+ mode: 0555
+ - name: messages
+ emptyDir: {}
diff --git a/examples/distribution-eks-fargate/rendered_manifests/secret-splunk.yaml b/examples/distribution-eks-fargate/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/distribution-eks-fargate/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/distribution-eks-fargate/rendered_manifests/service-cluster-receiver-stateful-set.yaml b/examples/distribution-eks-fargate/rendered_manifests/service-cluster-receiver-stateful-set.yaml
new file mode 100644
index 0000000000..7b4caba456
--- /dev/null
+++ b/examples/distribution-eks-fargate/rendered_manifests/service-cluster-receiver-stateful-set.yaml
@@ -0,0 +1,13 @@
+---
+# Source: splunk-otel-collector/templates/service-cluster-receiver-stateful-set.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app: splunk-otel-collector
+spec:
+ clusterIP: None
+ selector:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
diff --git a/examples/distribution-eks-fargate/rendered_manifests/serviceAccount.yaml b/examples/distribution-eks-fargate/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/distribution-eks-fargate/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/distribution-eks/rendered_manifests/clusterRole.yaml b/examples/distribution-eks/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/distribution-eks/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/distribution-eks/rendered_manifests/clusterRoleBinding.yaml b/examples/distribution-eks/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/distribution-eks/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/distribution-eks/rendered_manifests/configmap-agent.yaml b/examples/distribution-eks/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..dec517c3c4
--- /dev/null
+++ b/examples/distribution-eks/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,242 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - eks
+ - ec2
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ kubeletstats:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10250
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers: null
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ - signalfx
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/distribution-eks/rendered_manifests/configmap-cluster-receiver.yaml b/examples/distribution-eks/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..457075b930
--- /dev/null
+++ b/examples/distribution-eks/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,110 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - eks
+ - ec2
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/distribution-eks/rendered_manifests/daemonset.yaml b/examples/distribution-eks/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..16a6ae2a5a
--- /dev/null
+++ b/examples/distribution-eks/rendered_manifests/daemonset.yaml
@@ -0,0 +1,193 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: 1efc378d2d2ed6fb8abb84d3e308e27b921f750e04c9649e8084aa6f2412a6f8
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ ports:
+ - name: jaeger-grpc
+ containerPort: 14250
+ hostPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ hostPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: sfx-forwarder
+ containerPort: 9080
+ hostPort: 9080
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ hostPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ hostPort: 9411
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ # Env variables for host metrics receiver
+ - name: HOST_PROC
+ value: /hostfs/proc
+ - name: HOST_SYS
+ value: /hostfs/sys
+ - name: HOST_ETC
+ value: /hostfs/etc
+ - name: HOST_VAR
+ value: /hostfs/var
+ - name: HOST_RUN
+ value: /hostfs/run
+ - name: HOST_DEV
+ value: /hostfs/dev
+ # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879
+ # is resolved fall back to previous gopsutil mountinfo path:
+ # https://github.com/shirou/gopsutil/issues/1271
+ - name: HOST_PROC_MOUNTINFO
+ value: /proc/self/mountinfo
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ - mountPath: /hostfs/dev
+ name: host-dev
+ readOnly: true
+ - mountPath: /hostfs/etc
+ name: host-etc
+ readOnly: true
+ - mountPath: /hostfs/proc
+ name: host-proc
+ readOnly: true
+ - mountPath: /hostfs/run/udev/data
+ name: host-run-udev-data
+ readOnly: true
+ - mountPath: /hostfs/sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /hostfs/var/run/utmp
+ name: host-var-run-utmp
+ readOnly: true
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: host-dev
+ hostPath:
+ path: /dev
+ - name: host-etc
+ hostPath:
+ path: /etc
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-run-udev-data
+ hostPath:
+ path: /run/udev/data
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-var-run-utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/distribution-eks/rendered_manifests/deployment-cluster-receiver.yaml b/examples/distribution-eks/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..d333f831d8
--- /dev/null
+++ b/examples/distribution-eks/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: cc8e0ec167fd5c4f92b6650c413ad6dc90cf4c0fad2cf88dedc45666a6e796ce
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/distribution-eks/rendered_manifests/secret-splunk.yaml b/examples/distribution-eks/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/distribution-eks/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/distribution-eks/rendered_manifests/serviceAccount.yaml b/examples/distribution-eks/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/distribution-eks/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/distribution-gke-autopilot/rendered_manifests/clusterRole.yaml b/examples/distribution-gke-autopilot/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/distribution-gke-autopilot/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/distribution-gke-autopilot/rendered_manifests/clusterRoleBinding.yaml b/examples/distribution-gke-autopilot/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/distribution-gke-autopilot/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/distribution-gke-autopilot/rendered_manifests/configmap-agent.yaml b/examples/distribution-gke-autopilot/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..b738495d19
--- /dev/null
+++ b/examples/distribution-gke-autopilot/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,242 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - gke
+ - gce
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ kubeletstats:
+ auth_type: none
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10255
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers: null
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ - signalfx
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/distribution-gke-autopilot/rendered_manifests/configmap-cluster-receiver.yaml b/examples/distribution-gke-autopilot/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..fcb917502f
--- /dev/null
+++ b/examples/distribution-gke-autopilot/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,110 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - gke
+ - gce
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/distribution-gke-autopilot/rendered_manifests/daemonset.yaml b/examples/distribution-gke-autopilot/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..e8ff019a6b
--- /dev/null
+++ b/examples/distribution-gke-autopilot/rendered_manifests/daemonset.yaml
@@ -0,0 +1,194 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: 2d85ffa1cf2e4750b3e39183d515f1ab969eff9d7bd0800704488c56dc66a278
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ - --metrics-addr=0.0.0.0:8889
+ ports:
+ - name: jaeger-grpc
+ containerPort: 14250
+ hostPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ hostPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: sfx-forwarder
+ containerPort: 9080
+ hostPort: 9080
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ hostPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ hostPort: 9411
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ # Env variables for host metrics receiver
+ - name: HOST_PROC
+ value: /hostfs/proc
+ - name: HOST_SYS
+ value: /hostfs/sys
+ - name: HOST_ETC
+ value: /hostfs/etc
+ - name: HOST_VAR
+ value: /hostfs/var
+ - name: HOST_RUN
+ value: /hostfs/run
+ - name: HOST_DEV
+ value: /hostfs/dev
+ # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879
+ # is resolved fall back to previous gopsutil mountinfo path:
+ # https://github.com/shirou/gopsutil/issues/1271
+ - name: HOST_PROC_MOUNTINFO
+ value: /proc/self/mountinfo
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ - mountPath: /hostfs/dev
+ name: host-dev
+ readOnly: true
+ - mountPath: /hostfs/etc
+ name: host-etc
+ readOnly: true
+ - mountPath: /hostfs/proc
+ name: host-proc
+ readOnly: true
+ - mountPath: /hostfs/run/udev/data
+ name: host-run-udev-data
+ readOnly: true
+ - mountPath: /hostfs/sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /hostfs/var/run/utmp
+ name: host-var-run-utmp
+ readOnly: true
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: host-dev
+ hostPath:
+ path: /dev
+ - name: host-etc
+ hostPath:
+ path: /etc
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-run-udev-data
+ hostPath:
+ path: /run/udev/data
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-var-run-utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/distribution-gke-autopilot/rendered_manifests/deployment-cluster-receiver.yaml b/examples/distribution-gke-autopilot/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..217dd33a8e
--- /dev/null
+++ b/examples/distribution-gke-autopilot/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: 0992f278bded8f2a0b7f16476122884a779c1fa4fb468d95cc78d2db826a3c8f
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/distribution-gke-autopilot/rendered_manifests/secret-splunk.yaml b/examples/distribution-gke-autopilot/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/distribution-gke-autopilot/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/distribution-gke-autopilot/rendered_manifests/serviceAccount.yaml b/examples/distribution-gke-autopilot/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/distribution-gke-autopilot/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/distribution-gke/rendered_manifests/clusterRole.yaml b/examples/distribution-gke/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/distribution-gke/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/distribution-gke/rendered_manifests/clusterRoleBinding.yaml b/examples/distribution-gke/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/distribution-gke/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/distribution-gke/rendered_manifests/configmap-agent.yaml b/examples/distribution-gke/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..9a315b89c3
--- /dev/null
+++ b/examples/distribution-gke/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,242 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - gke
+ - gce
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ kubeletstats:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10250
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers: null
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ - signalfx
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/distribution-gke/rendered_manifests/configmap-cluster-receiver.yaml b/examples/distribution-gke/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..fcb917502f
--- /dev/null
+++ b/examples/distribution-gke/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,110 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - gke
+ - gce
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/distribution-gke/rendered_manifests/daemonset.yaml b/examples/distribution-gke/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..5ce52c75b4
--- /dev/null
+++ b/examples/distribution-gke/rendered_manifests/daemonset.yaml
@@ -0,0 +1,193 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: 5df6ee669d36b2d8c9040a9d928406cd60663246479f52373b5d5f72416980aa
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ ports:
+ - name: jaeger-grpc
+ containerPort: 14250
+ hostPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ hostPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: sfx-forwarder
+ containerPort: 9080
+ hostPort: 9080
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ hostPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ hostPort: 9411
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ # Env variables for host metrics receiver
+ - name: HOST_PROC
+ value: /hostfs/proc
+ - name: HOST_SYS
+ value: /hostfs/sys
+ - name: HOST_ETC
+ value: /hostfs/etc
+ - name: HOST_VAR
+ value: /hostfs/var
+ - name: HOST_RUN
+ value: /hostfs/run
+ - name: HOST_DEV
+ value: /hostfs/dev
+ # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879
+ # is resolved fall back to previous gopsutil mountinfo path:
+ # https://github.com/shirou/gopsutil/issues/1271
+ - name: HOST_PROC_MOUNTINFO
+ value: /proc/self/mountinfo
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ - mountPath: /hostfs/dev
+ name: host-dev
+ readOnly: true
+ - mountPath: /hostfs/etc
+ name: host-etc
+ readOnly: true
+ - mountPath: /hostfs/proc
+ name: host-proc
+ readOnly: true
+ - mountPath: /hostfs/run/udev/data
+ name: host-run-udev-data
+ readOnly: true
+ - mountPath: /hostfs/sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /hostfs/var/run/utmp
+ name: host-var-run-utmp
+ readOnly: true
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: host-dev
+ hostPath:
+ path: /dev
+ - name: host-etc
+ hostPath:
+ path: /etc
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-run-udev-data
+ hostPath:
+ path: /run/udev/data
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-var-run-utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/distribution-gke/rendered_manifests/deployment-cluster-receiver.yaml b/examples/distribution-gke/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..217dd33a8e
--- /dev/null
+++ b/examples/distribution-gke/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: 0992f278bded8f2a0b7f16476122884a779c1fa4fb468d95cc78d2db826a3c8f
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/distribution-gke/rendered_manifests/secret-splunk.yaml b/examples/distribution-gke/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/distribution-gke/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/distribution-gke/rendered_manifests/serviceAccount.yaml b/examples/distribution-gke/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/distribution-gke/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/distribution-openshift/rendered_manifests/clusterRole.yaml b/examples/distribution-openshift/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..7207bb4f94
--- /dev/null
+++ b/examples/distribution-openshift/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,91 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - quota.openshift.io
+ resources:
+ - clusterresourcequotas
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/distribution-openshift/rendered_manifests/clusterRoleBinding.yaml b/examples/distribution-openshift/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/distribution-openshift/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/distribution-openshift/rendered_manifests/configmap-agent.yaml b/examples/distribution-openshift/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..06601870d1
--- /dev/null
+++ b/examples/distribution-openshift/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,286 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ kubeletstats:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10250
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers:
+ smartagent/coredns:
+ config:
+ extraDimensions:
+ metric_source: k8s-coredns
+ port: 9154
+ skipVerify: true
+ type: coredns
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "pod" && namespace == "openshift-dns" && name contains "dns"
+ smartagent/kube-controller-manager:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-controller-manager
+ port: 10257
+ skipVerify: true
+ type: kube-controller-manager
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "pod" && labels["app"] == "kube-controller-manager" && labels["kube-controller-manager"]
+ == "true"
+ smartagent/kubernetes-apiserver:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-apiserver
+ skipVerify: true
+ type: kubernetes-apiserver
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "port" && port == 6443 && pod.labels["app"] == "openshift-kube-apiserver"
+ && pod.labels["apiserver"] == "true"
+ smartagent/kubernetes-proxy:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-proxy
+ port: 29101
+ type: kubernetes-proxy
+ rule: type == "pod" && labels["app"] == "sdn"
+ smartagent/kubernetes-scheduler:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-scheduler
+ port: 10251
+ type: kubernetes-scheduler
+ rule: type == "pod" && labels["app"] == "openshift-kube-scheduler" && labels["scheduler"]
+ == "true"
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ - signalfx
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/distribution-openshift/rendered_manifests/configmap-cluster-receiver.yaml b/examples/distribution-openshift/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..308d4976c4
--- /dev/null
+++ b/examples/distribution-openshift/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,109 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ distribution: openshift
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/distribution-openshift/rendered_manifests/daemonset.yaml b/examples/distribution-openshift/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..78bbab1d54
--- /dev/null
+++ b/examples/distribution-openshift/rendered_manifests/daemonset.yaml
@@ -0,0 +1,193 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: cbd318d73ff14d3e055da8fdd43f8a992621242e6f5a9dc378ac967c840a8bf8
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ ports:
+ - name: jaeger-grpc
+ containerPort: 14250
+ hostPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ hostPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: sfx-forwarder
+ containerPort: 9080
+ hostPort: 9080
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ hostPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ hostPort: 9411
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ # Env variables for host metrics receiver
+ - name: HOST_PROC
+ value: /hostfs/proc
+ - name: HOST_SYS
+ value: /hostfs/sys
+ - name: HOST_ETC
+ value: /hostfs/etc
+ - name: HOST_VAR
+ value: /hostfs/var
+ - name: HOST_RUN
+ value: /hostfs/run
+ - name: HOST_DEV
+ value: /hostfs/dev
+ # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879
+ # is resolved fall back to previous gopsutil mountinfo path:
+ # https://github.com/shirou/gopsutil/issues/1271
+ - name: HOST_PROC_MOUNTINFO
+ value: /proc/self/mountinfo
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ - mountPath: /hostfs/dev
+ name: host-dev
+ readOnly: true
+ - mountPath: /hostfs/etc
+ name: host-etc
+ readOnly: true
+ - mountPath: /hostfs/proc
+ name: host-proc
+ readOnly: true
+ - mountPath: /hostfs/run/udev/data
+ name: host-run-udev-data
+ readOnly: true
+ - mountPath: /hostfs/sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /hostfs/var/run/utmp
+ name: host-var-run-utmp
+ readOnly: true
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: host-dev
+ hostPath:
+ path: /dev
+ - name: host-etc
+ hostPath:
+ path: /etc
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-run-udev-data
+ hostPath:
+ path: /run/udev/data
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-var-run-utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/distribution-openshift/rendered_manifests/deployment-cluster-receiver.yaml b/examples/distribution-openshift/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..56cca4a110
--- /dev/null
+++ b/examples/distribution-openshift/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: a718608b6795ba91603a1b3d3be137a8cd6364aa79f58161c72d1150ccedc49c
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/distribution-openshift/rendered_manifests/secret-splunk.yaml b/examples/distribution-openshift/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/distribution-openshift/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/distribution-openshift/rendered_manifests/securityContextConstraints.yaml b/examples/distribution-openshift/rendered_manifests/securityContextConstraints.yaml
new file mode 100644
index 0000000000..4639933a04
--- /dev/null
+++ b/examples/distribution-openshift/rendered_manifests/securityContextConstraints.yaml
@@ -0,0 +1,50 @@
+---
+# Source: splunk-otel-collector/templates/securityContextConstraints.yaml
+kind: SecurityContextConstraints
+apiVersion: security.openshift.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+users:
+- system:serviceaccount:default:default-splunk-otel-collector
+allowHostDirVolumePlugin: true
+allowHostIPC: false
+allowHostNetwork: true
+allowHostPID: true
+allowHostPorts: true
+allowPrivilegedContainer: false
+allowedCapabilities: []
+allowedFlexVolumes: []
+defaultAddCapabilities: []
+fsGroup:
+ type: MustRunAs
+priority: 10
+readOnlyRootFilesystem: true
+requiredDropCapabilities:
+- ALL
+runAsUser:
+ type: RunAsAny
+seLinuxContext:
+ seLinuxOptions:
+ level: s0
+ role: system_r
+ type: spc_t
+ user: system_u
+ type: MustRunAs
+supplementalGroups:
+ type: RunAsAny
+volumes:
+- configMap
+- downwardAPI
+- emptyDir
+- hostPath
+- secret
diff --git a/examples/distribution-openshift/rendered_manifests/serviceAccount.yaml b/examples/distribution-openshift/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/distribution-openshift/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/distribution-vanilla/rendered_manifests/clusterRole.yaml b/examples/distribution-vanilla/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/distribution-vanilla/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/distribution-vanilla/rendered_manifests/clusterRoleBinding.yaml b/examples/distribution-vanilla/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/distribution-vanilla/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/distribution-vanilla/rendered_manifests/configmap-agent.yaml b/examples/distribution-vanilla/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..8f9144180f
--- /dev/null
+++ b/examples/distribution-vanilla/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,280 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ kubeletstats:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10250
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers:
+ smartagent/coredns:
+ config:
+ extraDimensions:
+ metric_source: k8s-coredns
+ port: 9153
+ type: coredns
+ rule: type == "pod" && labels["k8s-app"] == "kube-dns"
+ smartagent/kube-controller-manager:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-controller-manager
+ port: 10257
+ skipVerify: true
+ type: kube-controller-manager
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "pod" && labels["k8s-app"] == "kube-controller-manager"
+ smartagent/kubernetes-apiserver:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-apiserver
+ skipVerify: true
+ type: kubernetes-apiserver
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "port" && port == 443 && pod.labels["k8s-app"] == "kube-apiserver"
+ smartagent/kubernetes-proxy:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-proxy
+ port: 10249
+ type: kubernetes-proxy
+ rule: type == "pod" && labels["k8s-app"] == "kube-proxy"
+ smartagent/kubernetes-scheduler:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-scheduler
+ port: 10251
+ type: kubernetes-scheduler
+ rule: type == "pod" && labels["k8s-app"] == "kube-scheduler"
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ - signalfx
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/distribution-vanilla/rendered_manifests/configmap-cluster-receiver.yaml b/examples/distribution-vanilla/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..ae040bb721
--- /dev/null
+++ b/examples/distribution-vanilla/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,108 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/distribution-vanilla/rendered_manifests/daemonset.yaml b/examples/distribution-vanilla/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..7766ddb26a
--- /dev/null
+++ b/examples/distribution-vanilla/rendered_manifests/daemonset.yaml
@@ -0,0 +1,193 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: f24285909af0884c7557482977a7a54aa1294e3a121a5cf78d7572a19fc5bafd
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ ports:
+ - name: jaeger-grpc
+ containerPort: 14250
+ hostPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ hostPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: sfx-forwarder
+ containerPort: 9080
+ hostPort: 9080
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ hostPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ hostPort: 9411
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ # Env variables for host metrics receiver
+ - name: HOST_PROC
+ value: /hostfs/proc
+ - name: HOST_SYS
+ value: /hostfs/sys
+ - name: HOST_ETC
+ value: /hostfs/etc
+ - name: HOST_VAR
+ value: /hostfs/var
+ - name: HOST_RUN
+ value: /hostfs/run
+ - name: HOST_DEV
+ value: /hostfs/dev
+ # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879
+ # is resolved fall back to previous gopsutil mountinfo path:
+ # https://github.com/shirou/gopsutil/issues/1271
+ - name: HOST_PROC_MOUNTINFO
+ value: /proc/self/mountinfo
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ - mountPath: /hostfs/dev
+ name: host-dev
+ readOnly: true
+ - mountPath: /hostfs/etc
+ name: host-etc
+ readOnly: true
+ - mountPath: /hostfs/proc
+ name: host-proc
+ readOnly: true
+ - mountPath: /hostfs/run/udev/data
+ name: host-run-udev-data
+ readOnly: true
+ - mountPath: /hostfs/sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /hostfs/var/run/utmp
+ name: host-var-run-utmp
+ readOnly: true
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: host-dev
+ hostPath:
+ path: /dev
+ - name: host-etc
+ hostPath:
+ path: /etc
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-run-udev-data
+ hostPath:
+ path: /run/udev/data
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-var-run-utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/distribution-vanilla/rendered_manifests/deployment-cluster-receiver.yaml b/examples/distribution-vanilla/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..f474b9b96e
--- /dev/null
+++ b/examples/distribution-vanilla/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: 94371fe9c8062ad6c2eb9da843086ee092b3d1ddc2753b9f8198e6a422c5a20c
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/distribution-vanilla/rendered_manifests/secret-splunk.yaml b/examples/distribution-vanilla/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/distribution-vanilla/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/distribution-vanilla/rendered_manifests/serviceAccount.yaml b/examples/distribution-vanilla/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/distribution-vanilla/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/enable-network-explorer/rendered_manifests/clusterRole.yaml b/examples/enable-network-explorer/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/enable-network-explorer/rendered_manifests/clusterRoleBinding.yaml b/examples/enable-network-explorer/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/enable-network-explorer/rendered_manifests/configmap-cluster-receiver.yaml b/examples/enable-network-explorer/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..ae040bb721
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,108 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/enable-network-explorer/rendered_manifests/configmap-gateway.yaml b/examples/enable-network-explorer/rendered_manifests/configmap-gateway.yaml
new file mode 100644
index 0000000000..13e4f9729f
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/configmap-gateway.yaml
@@ -0,0 +1,200 @@
+---
+# Source: splunk-otel-collector/templates/configmap-gateway.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ sending_queue:
+ num_consumers: 32
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sending_queue:
+ num_consumers: 32
+ extensions:
+ health_check: null
+ http_forwarder:
+ egress:
+ endpoint: https://api.CHANGEME.signalfx.com
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource/add_cluster_name:
+ attributes:
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/collector:
+ config:
+ scrape_configs:
+ - job_name: otel-collector
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ signalfx:
+ access_token_passthrough: true
+ endpoint: 0.0.0.0:9943
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ - zpages
+ - http_forwarder
+ pipelines:
+ logs/signalfx-events:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ receivers:
+ - signalfx
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_cluster_name
+ receivers:
+ - otlp
+ - signalfx
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource/add_cluster_name
+ receivers:
+ - prometheus/collector
+ traces:
+ exporters:
+ - sapm
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resource/add_cluster_name
+ receivers:
+ - otlp
+ - jaeger
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/enable-network-explorer/rendered_manifests/deployment-cluster-receiver.yaml b/examples/enable-network-explorer/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..f474b9b96e
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: 94371fe9c8062ad6c2eb9da843086ee092b3d1ddc2753b9f8198e6a422c5a20c
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/enable-network-explorer/rendered_manifests/deployment-gateway.yaml b/examples/enable-network-explorer/rendered_manifests/deployment-gateway.yaml
new file mode 100644
index 0000000000..e28c604bcb
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/deployment-gateway.yaml
@@ -0,0 +1,121 @@
+---
+# Source: splunk-otel-collector/templates/deployment-gateway.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-collector
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-collector
+ release: default
+ annotations:
+ checksum/config: 0a71c96ab49070efbab9792ac6bd2a7fd6c79456ffbd8742ee2dda17103c2ae6
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "8192"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ ports:
+ - name: http-forwarder
+ containerPort: 6060
+ protocol: TCP
+ - name: jaeger-grpc
+ containerPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 4
+ memory: 8Gi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-collector
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/enable-network-explorer/rendered_manifests/network-explorer/configmap.yaml b/examples/enable-network-explorer/rendered_manifests/network-explorer/configmap.yaml
new file mode 100644
index 0000000000..ec1f1bcedc
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/network-explorer/configmap.yaml
@@ -0,0 +1,11 @@
+---
+# Source: splunk-otel-collector/templates/network-explorer/configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-config
+ namespace: "default"
+data:
+ config.yaml: |
+ labels:
+ environment: CHANGEME
diff --git a/examples/enable-network-explorer/rendered_manifests/network-explorer/k8s-collector-clusterrole.yaml b/examples/enable-network-explorer/rendered_manifests/network-explorer/k8s-collector-clusterrole.yaml
new file mode 100644
index 0000000000..9dc2d6100b
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/network-explorer/k8s-collector-clusterrole.yaml
@@ -0,0 +1,28 @@
+---
+# Source: splunk-otel-collector/templates/network-explorer/k8s-collector-clusterrole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app: default-splunk-otel-collector-k8s-collector
+ chart: splunk-otel-collector-0.70.0
+ heritage: Helm
+ release: default
+ name: default-splunk-otel-collector-k8s-collector
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/enable-network-explorer/rendered_manifests/network-explorer/k8s-collector-clusterrolebinding.yaml b/examples/enable-network-explorer/rendered_manifests/network-explorer/k8s-collector-clusterrolebinding.yaml
new file mode 100644
index 0000000000..01ef62056c
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/network-explorer/k8s-collector-clusterrolebinding.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/network-explorer/k8s-collector-clusterrolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app: default-splunk-otel-collector-k8s-collector
+ chart: splunk-otel-collector-0.70.0
+ heritage: Helm
+ release: default
+ name: default-splunk-otel-collector-k8s-collector
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector-k8s-collector
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: default
diff --git a/examples/enable-network-explorer/rendered_manifests/network-explorer/k8s-collector-deployment.yaml b/examples/enable-network-explorer/rendered_manifests/network-explorer/k8s-collector-deployment.yaml
new file mode 100644
index 0000000000..a45d204f0a
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/network-explorer/k8s-collector-deployment.yaml
@@ -0,0 +1,78 @@
+---
+# Source: splunk-otel-collector/templates/network-explorer/k8s-collector-deployment.yaml
+# The k8s-collector consists of two services:
+# 1) k8s-watcher: talks to the Kubernetes API server to determine the current state of
+# the cluster; sets up watches to be notified of subsequent changes to pods, services
+# and other resources.
+# 2) k8s-relay: relays the information collected by k8s-watcher to the reducer.
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-collector
+ labels:
+ app.kubernetes.io/name: default-splunk-otel-collector-k8s-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/managed-by: Helm
+spec:
+ progressDeadlineSeconds: 600
+ replicas: 1
+ revisionHistoryLimit: 3
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: default-splunk-otel-collector-k8s-collector
+ app.kubernetes.io/instance: default
+ strategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ annotations:
+ # This is here to allow us to do "zero-downtime" updates without an image change.
+ rollingUpdateVersion: "1"
+ charts.flowmill.com/version: 0.70.0
+ labels:
+ app.kubernetes.io/name: default-splunk-otel-collector-k8s-collector
+ app.kubernetes.io/instance: default
+ spec:
+ containers:
+ - image: "quay.io/signalfx/splunk-network-explorer-k8s-watcher:latest-v0.9"
+ imagePullPolicy: Always
+ name: k8s-watcher
+ args:
+ - --log-console
+ - --log-level=warning
+ # k8s-relay, which is a service that the k8s-watcher talks to.
+ # Currently not configurable, has to be reachable on localhost:8172, so must
+ # share a pod with the k8s-watcher above.
+ - image: "quay.io/signalfx/splunk-network-explorer-k8s-relay:latest-v0.9"
+ imagePullPolicy: Always
+ name: k8s-relay
+ args:
+ - --config-file=/etc/network-explorer/config.yaml
+ - --warning
+ env:
+ - name: "EBPF_NET_CLUSTER_NAME"
+ value: "CHANGEME"
+ - name: "EBPF_NET_INTAKE_HOST"
+ value: default-splunk-otel-collector-reducer
+ - name: "EBPF_NET_INTAKE_PORT"
+ value: "7000"
+ - name: "EBPF_NET_CRASH_METRIC_HOST"
+ value: "default-splunk-otel-collector"
+ - name: "EBPF_NET_CRASH_METRIC_PORT"
+ value: "4317"
+ volumeMounts:
+ - mountPath: /etc/network-explorer
+ name: k8s-relay-config
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: k8s-relay-config
+ projected:
+ sources:
+ - configMap:
+ name: default-splunk-otel-collector-config
+ items:
+ - key: config.yaml
+ path: config.yaml
+ securityContext: {}
+ serviceAccountName: default
diff --git a/examples/enable-network-explorer/rendered_manifests/network-explorer/kernel-collector-clusterrole.yaml b/examples/enable-network-explorer/rendered_manifests/network-explorer/kernel-collector-clusterrole.yaml
new file mode 100644
index 0000000000..04eea3ae3f
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/network-explorer/kernel-collector-clusterrole.yaml
@@ -0,0 +1,20 @@
+---
+# Source: splunk-otel-collector/templates/network-explorer/kernel-collector-clusterrole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app: default-splunk-otel-collector-kernel-collector
+ chart: splunk-otel-collector-0.70.0
+ heritage: Helm
+ release: default
+ name: default-splunk-otel-collector-kernel-collector
+rules:
+- apiGroups:
+ - policy
+ resourceNames:
+ - default-splunk-otel-collector-kernel-collector
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
diff --git a/examples/enable-network-explorer/rendered_manifests/network-explorer/kernel-collector-clusterrolebinding.yaml b/examples/enable-network-explorer/rendered_manifests/network-explorer/kernel-collector-clusterrolebinding.yaml
new file mode 100644
index 0000000000..0adf446272
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/network-explorer/kernel-collector-clusterrolebinding.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/network-explorer/kernel-collector-clusterrolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app: default-splunk-otel-collector-kernel-collector
+ chart: splunk-otel-collector-0.70.0
+ heritage: Helm
+ release: default
+ name: default-splunk-otel-collector-kernel-collector
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector-kernel-collector
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: default
diff --git a/examples/enable-network-explorer/rendered_manifests/network-explorer/kernel-collector-daemonset.yaml b/examples/enable-network-explorer/rendered_manifests/network-explorer/kernel-collector-daemonset.yaml
new file mode 100644
index 0000000000..be4cf0a8dc
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/network-explorer/kernel-collector-daemonset.yaml
@@ -0,0 +1,104 @@
+---
+# Source: splunk-otel-collector/templates/network-explorer/kernel-collector-daemonset.yaml
+# kernel collector daemonset: deploys the kernel collector to each node in the cluster.
+# The kernel collector needs to be able to compile and install
+# eBPF programs in the node's kernel, so needs to run as root and
+# needs to mount /lib/modules and /usr/src from the node itself.
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-kernel-collector
+ annotations:
+ labels:
+ app.kubernetes.io/name: default-splunk-otel-collector-kernel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/managed-by: Helm
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: default-splunk-otel-collector-kernel-collector
+ app.kubernetes.io/instance: default
+ template:
+ metadata:
+ annotations:
+ release_number: "3"
+ charts.flowmill.com/version: 0.70.0
+ labels:
+ app.kubernetes.io/name: default-splunk-otel-collector-kernel-collector
+ app.kubernetes.io/instance: default
+ spec:
+ containers:
+ - name: kernel-collector
+ image: "quay.io/signalfx/splunk-network-explorer-kernel-collector:latest-v0.9"
+ imagePullPolicy: Always
+ args:
+ - --config-file=/etc/network-explorer/config.yaml
+ - --force-docker-metadata
+ - --disable-nomad-metadata
+ - --warning
+ # TODO: liveness probe
+ env:
+ - name: "EBPF_NET_CLUSTER_NAME"
+ value: "CHANGEME"
+ - name: "EBPF_NET_DISABLE_HTTP_METRICS"
+ value: "false"
+ - name: "EBPF_NET_KERNEL_HEADERS_AUTO_FETCH"
+ value: "true"
+ - name: "EBPF_NET_INTAKE_HOST"
+ value: default-splunk-otel-collector-reducer
+ - name: "EBPF_NET_INTAKE_PORT"
+ value: "7000"
+ - name: "EBPF_NET_HOST_DIR"
+ value: "/hostfs"
+ - name: "EBPF_NET_CRASH_METRIC_HOST"
+ value: "default-splunk-otel-collector"
+ - name: "EBPF_NET_CRASH_METRIC_PORT"
+ value: "4317"
+ resources:
+ {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /hostfs/
+ name: host-root
+ readOnly: true
+ - mountPath: /hostfs/var/cache
+ name: host-var-cache
+ readOnly: false
+ - mountPath: /etc/network-explorer
+ name: default-splunk-otel-collector-config
+ readOnly: true
+ - mountPath: /var/run/docker.sock
+ name: docker-sock
+ readOnly: false
+ dnsPolicy: ClusterFirstWithHostNet
+ hostNetwork: true
+ hostPID: true
+ serviceAccountName: default
+ volumes:
+ - name: default-splunk-otel-collector-config
+ projected:
+ sources:
+ - configMap:
+ name: default-splunk-otel-collector-config
+ items:
+ - key: config.yaml
+ path: config.yaml
+ - name: host-root
+ hostPath:
+ path: /
+ type: Directory
+ - name: host-var-cache
+ hostPath:
+ path: /var/cache
+ type: DirectoryOrCreate
+ - name: docker-sock
+ hostPath:
+ path: /var/run/docker.sock
+ type: Socket
+ tolerations:
+ - effect: NoExecute
+ operator: Exists
+ - effect: NoSchedule
+ operator: Exists
diff --git a/examples/enable-network-explorer/rendered_manifests/network-explorer/kernel-collector-podsecuritypolicy.yaml b/examples/enable-network-explorer/rendered_manifests/network-explorer/kernel-collector-podsecuritypolicy.yaml
new file mode 100644
index 0000000000..5e097fe8b7
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/network-explorer/kernel-collector-podsecuritypolicy.yaml
@@ -0,0 +1,36 @@
+---
+# Source: splunk-otel-collector/templates/network-explorer/kernel-collector-podsecuritypolicy.yaml
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: default-splunk-otel-collector-kernel-collector
+spec:
+ privileged: true
+ allowPrivilegeEscalation: true
+ allowedHostPaths:
+ - pathPrefix: /usr/src
+ readOnly: true
+ - pathPrefix: /lib/modules
+ readOnly: true
+ - pathPrefix: /etc
+ readOnly: true
+ - pathPrefix: /var/cache
+ readOnly: false
+ - pathPrefix: /sys/fs/cgroup
+ readOnly: true
+ fsGroup:
+ rule: RunAsAny
+ hostNetwork: true
+ hostPorts:
+ - max: 65535
+ min: 0
+ hostPID: true
+ readOnlyRootFilesystem: false
+ runAsUser:
+ rule: RunAsAny
+ seLinux:
+ rule: RunAsAny
+ supplementalGroups:
+ rule: RunAsAny
+ volumes:
+ - "*"
diff --git a/examples/enable-network-explorer/rendered_manifests/network-explorer/reducer-deployment.yaml b/examples/enable-network-explorer/rendered_manifests/network-explorer/reducer-deployment.yaml
new file mode 100644
index 0000000000..72715bb28c
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/network-explorer/reducer-deployment.yaml
@@ -0,0 +1,64 @@
+---
+# Source: splunk-otel-collector/templates/network-explorer/reducer-deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-reducer
+ labels:
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/name: default-splunk-otel-collector-reducer
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ strategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: default-splunk-otel-collector-reducer
+ app.kubernetes.io/instance: default
+ template:
+ metadata:
+ annotations:
+ rollingUpdateVersion: "1"
+ labels:
+ app.kubernetes.io/name: default-splunk-otel-collector-reducer
+ app.kubernetes.io/instance: default
+ spec:
+ containers:
+ - name: reducer
+ image: "quay.io/signalfx/splunk-network-explorer-reducer:latest-v0.9"
+ imagePullPolicy: Always
+ args:
+ - --port=7000
+ - --log-console
+ - --no-log-file
+ - --warning
+ - --enable-aws-enrichment
+ - --disable-prometheus-metrics
+ - --enable-otlp-grpc-metrics
+ - --otlp-grpc-metrics-host=default-splunk-otel-collector
+ - --otlp-grpc-metrics-port=4317
+ - --num-ingest-shards=1
+ - --num-matching-shards=1
+ - --num-aggregation-shards=1
+ env:
+ - name: "EBPF_NET_CRASH_METRIC_HOST"
+ value: "default-splunk-otel-collector"
+ - name: "EBPF_NET_CRASH_METRIC_PORT"
+ value: "4317"
+ ports:
+ - name: telemetry
+ containerPort: 7000
+ protocol: TCP
+ volumeMounts:
+ readinessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 20
+ timeoutSeconds: 5
+ exec:
+ command: ['/srv/health_check.sh', 'readiness_probe', 'localhost', "7000"]
+ resources:
+ null
+ volumes:
diff --git a/examples/enable-network-explorer/rendered_manifests/network-explorer/reducer-service.yaml b/examples/enable-network-explorer/rendered_manifests/network-explorer/reducer-service.yaml
new file mode 100644
index 0000000000..cad7356434
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/network-explorer/reducer-service.yaml
@@ -0,0 +1,22 @@
+---
+# Source: splunk-otel-collector/templates/network-explorer/reducer-service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: default-splunk-otel-collector-reducer
+ labels:
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/name: default-splunk-otel-collector-reducer
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ selector:
+ app.kubernetes.io/name: default-splunk-otel-collector-reducer
+ app.kubernetes.io/instance: default
+ ports:
+ - name: telemetry
+ port: 7000
+ targetPort: telemetry
+ protocol: TCP
diff --git a/examples/enable-network-explorer/rendered_manifests/secret-splunk.yaml b/examples/enable-network-explorer/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/enable-network-explorer/rendered_manifests/service.yaml b/examples/enable-network-explorer/rendered_manifests/service.yaml
new file mode 100644
index 0000000000..4d41becb65
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/service.yaml
@@ -0,0 +1,57 @@
+---
+# Source: splunk-otel-collector/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-collector
+spec:
+ type: ClusterIP
+ ports:
+ - name: http-forwarder
+ port: 6060
+ targetPort: http-forwarder
+ protocol: TCP
+ - name: jaeger-grpc
+ port: 14250
+ targetPort: jaeger-grpc
+ protocol: TCP
+ - name: jaeger-thrift
+ port: 14268
+ targetPort: jaeger-thrift
+ protocol: TCP
+ - name: otlp
+ port: 4317
+ targetPort: otlp
+ protocol: TCP
+ - name: otlp-http
+ port: 4318
+ targetPort: otlp-http
+ protocol: TCP
+ - name: otlp-http-old
+ port: 55681
+ targetPort: otlp-http-old
+ protocol: TCP
+ - name: signalfx
+ port: 9943
+ targetPort: signalfx
+ protocol: TCP
+ - name: zipkin
+ port: 9411
+ targetPort: zipkin
+ protocol: TCP
+ selector:
+ app: splunk-otel-collector
+ component: otel-collector
+ release: default
diff --git a/examples/enable-network-explorer/rendered_manifests/serviceAccount.yaml b/examples/enable-network-explorer/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/enable-network-explorer/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/enable-trace-sampling/rendered_manifests/clusterRole.yaml b/examples/enable-trace-sampling/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/enable-trace-sampling/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/enable-trace-sampling/rendered_manifests/clusterRoleBinding.yaml b/examples/enable-trace-sampling/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/enable-trace-sampling/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/enable-trace-sampling/rendered_manifests/configmap-agent.yaml b/examples/enable-trace-sampling/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..0b27455720
--- /dev/null
+++ b/examples/enable-trace-sampling/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,202 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ probabilistic_sampler:
+ hash_seed: 22
+ sampling_percentage: 15.3
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ processors:
+ - memory_limiter
+ - probabilistic_sampler
+ - k8sattributes
+ - batch
+ - resource
+ - resourcedetection
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/enable-trace-sampling/rendered_manifests/daemonset.yaml b/examples/enable-trace-sampling/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..9be3a85ef3
--- /dev/null
+++ b/examples/enable-trace-sampling/rendered_manifests/daemonset.yaml
@@ -0,0 +1,135 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: ab13703d5dd5ac2341d3f122a1b80e2775536c7f7abb85f2b05e7de113881c1e
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ ports:
+ - name: jaeger-grpc
+ containerPort: 14250
+ hostPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ hostPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: sfx-forwarder
+ containerPort: 9080
+ hostPort: 9080
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ hostPort: 9411
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/enable-trace-sampling/rendered_manifests/secret-splunk.yaml b/examples/enable-trace-sampling/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/enable-trace-sampling/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/enable-trace-sampling/rendered_manifests/serviceAccount.yaml b/examples/enable-trace-sampling/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/enable-trace-sampling/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/enabled-pprof-extension/rendered_manifests/clusterRole.yaml b/examples/enabled-pprof-extension/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/enabled-pprof-extension/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/enabled-pprof-extension/rendered_manifests/clusterRoleBinding.yaml b/examples/enabled-pprof-extension/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/enabled-pprof-extension/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/enabled-pprof-extension/rendered_manifests/configmap-agent.yaml b/examples/enabled-pprof-extension/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..bd827c63d5
--- /dev/null
+++ b/examples/enabled-pprof-extension/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,282 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ pprof: null
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ kubeletstats:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10250
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers:
+ smartagent/coredns:
+ config:
+ extraDimensions:
+ metric_source: k8s-coredns
+ port: 9153
+ type: coredns
+ rule: type == "pod" && labels["k8s-app"] == "kube-dns"
+ smartagent/kube-controller-manager:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-controller-manager
+ port: 10257
+ skipVerify: true
+ type: kube-controller-manager
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "pod" && labels["k8s-app"] == "kube-controller-manager"
+ smartagent/kubernetes-apiserver:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-apiserver
+ skipVerify: true
+ type: kubernetes-apiserver
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "port" && port == 443 && pod.labels["k8s-app"] == "kube-apiserver"
+ smartagent/kubernetes-proxy:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-proxy
+ port: 10249
+ type: kubernetes-proxy
+ rule: type == "pod" && labels["k8s-app"] == "kube-proxy"
+ smartagent/kubernetes-scheduler:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-scheduler
+ port: 10251
+ type: kubernetes-scheduler
+ rule: type == "pod" && labels["k8s-app"] == "kube-scheduler"
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ - pprof
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ - signalfx
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/enabled-pprof-extension/rendered_manifests/configmap-cluster-receiver.yaml b/examples/enabled-pprof-extension/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..ae040bb721
--- /dev/null
+++ b/examples/enabled-pprof-extension/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,108 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/enabled-pprof-extension/rendered_manifests/daemonset.yaml b/examples/enabled-pprof-extension/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..8f389bf3d4
--- /dev/null
+++ b/examples/enabled-pprof-extension/rendered_manifests/daemonset.yaml
@@ -0,0 +1,193 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: 02cc42a814faa44ce25d6ede2fb23032bda87d1146e74979751c29788e270110
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ ports:
+ - name: jaeger-grpc
+ containerPort: 14250
+ hostPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ hostPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: sfx-forwarder
+ containerPort: 9080
+ hostPort: 9080
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ hostPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ hostPort: 9411
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ # Env variables for host metrics receiver
+ - name: HOST_PROC
+ value: /hostfs/proc
+ - name: HOST_SYS
+ value: /hostfs/sys
+ - name: HOST_ETC
+ value: /hostfs/etc
+ - name: HOST_VAR
+ value: /hostfs/var
+ - name: HOST_RUN
+ value: /hostfs/run
+ - name: HOST_DEV
+ value: /hostfs/dev
+ # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879
+ # is resolved fall back to previous gopsutil mountinfo path:
+ # https://github.com/shirou/gopsutil/issues/1271
+ - name: HOST_PROC_MOUNTINFO
+ value: /proc/self/mountinfo
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ - mountPath: /hostfs/dev
+ name: host-dev
+ readOnly: true
+ - mountPath: /hostfs/etc
+ name: host-etc
+ readOnly: true
+ - mountPath: /hostfs/proc
+ name: host-proc
+ readOnly: true
+ - mountPath: /hostfs/run/udev/data
+ name: host-run-udev-data
+ readOnly: true
+ - mountPath: /hostfs/sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /hostfs/var/run/utmp
+ name: host-var-run-utmp
+ readOnly: true
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: host-dev
+ hostPath:
+ path: /dev
+ - name: host-etc
+ hostPath:
+ path: /etc
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-run-udev-data
+ hostPath:
+ path: /run/udev/data
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-var-run-utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/enabled-pprof-extension/rendered_manifests/deployment-cluster-receiver.yaml b/examples/enabled-pprof-extension/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..f474b9b96e
--- /dev/null
+++ b/examples/enabled-pprof-extension/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: 94371fe9c8062ad6c2eb9da843086ee092b3d1ddc2753b9f8198e6a422c5a20c
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/enabled-pprof-extension/rendered_manifests/secret-splunk.yaml b/examples/enabled-pprof-extension/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/enabled-pprof-extension/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/enabled-pprof-extension/rendered_manifests/serviceAccount.yaml b/examples/enabled-pprof-extension/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/enabled-pprof-extension/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/filter-container-metrics/rendered_manifests/clusterRole.yaml b/examples/filter-container-metrics/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/filter-container-metrics/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/filter-container-metrics/rendered_manifests/clusterRoleBinding.yaml b/examples/filter-container-metrics/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/filter-container-metrics/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/filter-container-metrics/rendered_manifests/configmap-agent.yaml b/examples/filter-container-metrics/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..b9ed214d2e
--- /dev/null
+++ b/examples/filter-container-metrics/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,259 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/exclude_containers:
+ metrics:
+ exclude:
+ match_type: regexp
+ resource_attributes:
+ - Key: k8s.container.name
+ Value: ^(containerX|containerY)$
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ kubeletstats:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10250
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers:
+ smartagent/coredns:
+ config:
+ extraDimensions:
+ metric_source: k8s-coredns
+ port: 9153
+ type: coredns
+ rule: type == "pod" && labels["k8s-app"] == "kube-dns"
+ smartagent/kube-controller-manager:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-controller-manager
+ port: 10257
+ skipVerify: true
+ type: kube-controller-manager
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "pod" && labels["k8s-app"] == "kube-controller-manager"
+ smartagent/kubernetes-apiserver:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-apiserver
+ skipVerify: true
+ type: kubernetes-apiserver
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "port" && port == 443 && pod.labels["k8s-app"] == "kube-apiserver"
+ smartagent/kubernetes-proxy:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-proxy
+ port: 10249
+ type: kubernetes-proxy
+ rule: type == "pod" && labels["k8s-app"] == "kube-proxy"
+ smartagent/kubernetes-scheduler:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-scheduler
+ port: 10251
+ type: kubernetes-scheduler
+ rule: type == "pod" && labels["k8s-app"] == "kube-scheduler"
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ - filter/exclude_containers
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/filter-container-metrics/rendered_manifests/configmap-cluster-receiver.yaml b/examples/filter-container-metrics/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..b1aba08883
--- /dev/null
+++ b/examples/filter-container-metrics/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,116 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ filter/exclude_containers:
+ metrics:
+ exclude:
+ match_type: regexp
+ resource_attributes:
+ - Key: k8s.container.name
+ Value: ^(containerX|containerY)$
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ - filter/exclude_containers
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/filter-container-metrics/rendered_manifests/daemonset.yaml b/examples/filter-container-metrics/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..83c089464e
--- /dev/null
+++ b/examples/filter-container-metrics/rendered_manifests/daemonset.yaml
@@ -0,0 +1,177 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: 57ada3cba700545e990efc1d8c7edd2a249116a93973ebbef4c74faef73c09e6
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ ports:
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ hostPort: 9943
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ # Env variables for host metrics receiver
+ - name: HOST_PROC
+ value: /hostfs/proc
+ - name: HOST_SYS
+ value: /hostfs/sys
+ - name: HOST_ETC
+ value: /hostfs/etc
+ - name: HOST_VAR
+ value: /hostfs/var
+ - name: HOST_RUN
+ value: /hostfs/run
+ - name: HOST_DEV
+ value: /hostfs/dev
+ # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879
+ # is resolved fall back to previous gopsutil mountinfo path:
+ # https://github.com/shirou/gopsutil/issues/1271
+ - name: HOST_PROC_MOUNTINFO
+ value: /proc/self/mountinfo
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ - mountPath: /hostfs/dev
+ name: host-dev
+ readOnly: true
+ - mountPath: /hostfs/etc
+ name: host-etc
+ readOnly: true
+ - mountPath: /hostfs/proc
+ name: host-proc
+ readOnly: true
+ - mountPath: /hostfs/run/udev/data
+ name: host-run-udev-data
+ readOnly: true
+ - mountPath: /hostfs/sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /hostfs/var/run/utmp
+ name: host-var-run-utmp
+ readOnly: true
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: host-dev
+ hostPath:
+ path: /dev
+ - name: host-etc
+ hostPath:
+ path: /etc
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-run-udev-data
+ hostPath:
+ path: /run/udev/data
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-var-run-utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/filter-container-metrics/rendered_manifests/deployment-cluster-receiver.yaml b/examples/filter-container-metrics/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..6483121fc3
--- /dev/null
+++ b/examples/filter-container-metrics/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: 137d211d21eccc48e4268e12c29c811e0546677ab867f2cd2898978b836ad496
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/filter-container-metrics/rendered_manifests/secret-splunk.yaml b/examples/filter-container-metrics/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/filter-container-metrics/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/filter-container-metrics/rendered_manifests/serviceAccount.yaml b/examples/filter-container-metrics/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/filter-container-metrics/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRole.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRoleBinding.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-agent.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..8f9144180f
--- /dev/null
+++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,280 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ kubeletstats:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10250
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers:
+ smartagent/coredns:
+ config:
+ extraDimensions:
+ metric_source: k8s-coredns
+ port: 9153
+ type: coredns
+ rule: type == "pod" && labels["k8s-app"] == "kube-dns"
+ smartagent/kube-controller-manager:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-controller-manager
+ port: 10257
+ skipVerify: true
+ type: kube-controller-manager
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "pod" && labels["k8s-app"] == "kube-controller-manager"
+ smartagent/kubernetes-apiserver:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-apiserver
+ skipVerify: true
+ type: kubernetes-apiserver
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "port" && port == 443 && pod.labels["k8s-app"] == "kube-apiserver"
+ smartagent/kubernetes-proxy:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-proxy
+ port: 10249
+ type: kubernetes-proxy
+ rule: type == "pod" && labels["k8s-app"] == "kube-proxy"
+ smartagent/kubernetes-scheduler:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-scheduler
+ port: 10251
+ type: kubernetes-scheduler
+ rule: type == "pod" && labels["k8s-app"] == "kube-scheduler"
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ - signalfx
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-cluster-receiver.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..ae040bb721
--- /dev/null
+++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,108 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/daemonset.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/daemonset.yaml
new file mode 100644
index 0000000000..7766ddb26a
--- /dev/null
+++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/daemonset.yaml
@@ -0,0 +1,193 @@
+---
+# Source: splunk-otel-collector/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: default-splunk-otel-collector-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ release: default
+ annotations:
+ checksum/config: f24285909af0884c7557482977a7a54aa1294e3a121a5cf78d7572a19fc5bafd
+ kubectl.kubernetes.io/default-container: otel-collector
+ spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ ports:
+ - name: jaeger-grpc
+ containerPort: 14250
+ hostPort: 14250
+ protocol: TCP
+ - name: jaeger-thrift
+ containerPort: 14268
+ hostPort: 14268
+ protocol: TCP
+ - name: otlp
+ containerPort: 4317
+ hostPort: 4317
+ protocol: TCP
+ - name: otlp-http
+ containerPort: 4318
+ protocol: TCP
+ - name: otlp-http-old
+ containerPort: 55681
+ protocol: TCP
+ - name: sfx-forwarder
+ containerPort: 9080
+ hostPort: 9080
+ protocol: TCP
+ - name: signalfx
+ containerPort: 9943
+ hostPort: 9943
+ protocol: TCP
+ - name: zipkin
+ containerPort: 9411
+ hostPort: 9411
+ protocol: TCP
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ # Env variables for host metrics receiver
+ - name: HOST_PROC
+ value: /hostfs/proc
+ - name: HOST_SYS
+ value: /hostfs/sys
+ - name: HOST_ETC
+ value: /hostfs/etc
+ - name: HOST_VAR
+ value: /hostfs/var
+ - name: HOST_RUN
+ value: /hostfs/run
+ - name: HOST_DEV
+ value: /hostfs/dev
+ # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879
+ # is resolved fall back to previous gopsutil mountinfo path:
+ # https://github.com/shirou/gopsutil/issues/1271
+ - name: HOST_PROC_MOUNTINFO
+ value: /proc/self/mountinfo
+
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: otel-configmap
+ - mountPath: /hostfs/dev
+ name: host-dev
+ readOnly: true
+ - mountPath: /hostfs/etc
+ name: host-etc
+ readOnly: true
+ - mountPath: /hostfs/proc
+ name: host-proc
+ readOnly: true
+ - mountPath: /hostfs/run/udev/data
+ name: host-run-udev-data
+ readOnly: true
+ - mountPath: /hostfs/sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /hostfs/var/run/utmp
+ name: host-var-run-utmp
+ readOnly: true
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: host-dev
+ hostPath:
+ path: /dev
+ - name: host-etc
+ hostPath:
+ path: /etc
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-run-udev-data
+ hostPath:
+ path: /run/udev/data
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-var-run-utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: otel-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-agent
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/deployment-cluster-receiver.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/deployment-cluster-receiver.yaml
new file mode 100644
index 0000000000..f474b9b96e
--- /dev/null
+++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/deployment-cluster-receiver.yaml
@@ -0,0 +1,96 @@
+---
+# Source: splunk-otel-collector/templates/deployment-cluster-receiver.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: default-splunk-otel-collector-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+ app.kubernetes.io/component: otel-k8s-cluster-receiver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ template:
+ metadata:
+ labels:
+ app: splunk-otel-collector
+ component: otel-k8s-cluster-receiver
+ release: default
+ annotations:
+ checksum/config: 94371fe9c8062ad6c2eb9da843086ee092b3d1ddc2753b9f8198e6a422c5a20c
+ spec:
+ serviceAccountName: default-splunk-otel-collector
+ nodeSelector:
+ kubernetes.io/os: linux
+ containers:
+ - name: otel-collector
+ command:
+ - /otelcol
+ - --config=/conf/relay.yaml
+ image: quay.io/signalfx/splunk-otel-collector:0.70.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SPLUNK_MEMORY_TOTAL_MIB
+ value: "500"
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: K8S_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SPLUNK_OBSERVABILITY_ACCESS_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: splunk-otel-collector
+ key: splunk_observability_access_token
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 13133
+ resources:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ volumeMounts:
+ - mountPath: /conf
+ name: collector-configmap
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: collector-configmap
+ configMap:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ items:
+ - key: relay
+ path: relay.yaml
diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/secret-splunk.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/secret-splunk.yaml
new file mode 100644
index 0000000000..bf887d1f22
--- /dev/null
+++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/secret-splunk.yaml
@@ -0,0 +1,19 @@
+---
+# Source: splunk-otel-collector/templates/secret-splunk.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+type: Opaque
+data:
+ splunk_observability_access_token: Q0hBTkdFTUU=
diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/serviceAccount.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/serviceAccount.yaml
new file mode 100644
index 0000000000..8529587196
--- /dev/null
+++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/serviceAccount.yaml
@@ -0,0 +1,16 @@
+---
+# Source: splunk-otel-collector/templates/serviceAccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/clusterRole.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/clusterRole.yaml
new file mode 100644
index 0000000000..2a7dce9b91
--- /dev/null
+++ b/examples/kubernetes-windows-nodes/rendered_manifests/clusterRole.yaml
@@ -0,0 +1,83 @@
+---
+# Source: splunk-otel-collector/templates/clusterRole.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - namespaces
+ - namespaces/status
+ - nodes
+ - nodes/spec
+ - nodes/stats
+ - nodes/proxy
+ - pods
+ - pods/status
+ - persistentvolumeclaims
+ - persistentvolumes
+ - replicationcontrollers
+ - replicationcontrollers/status
+ - resourcequotas
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/clusterRoleBinding.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/clusterRoleBinding.yaml
new file mode 100644
index 0000000000..180cb8d6d2
--- /dev/null
+++ b/examples/kubernetes-windows-nodes/rendered_manifests/clusterRoleBinding.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/clusterRoleBinding.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: default-splunk-otel-collector
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: default-splunk-otel-collector
+subjects:
+- kind: ServiceAccount
+ name: default-splunk-otel-collector
+ namespace: default
diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/configmap-agent.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/configmap-agent.yaml
new file mode 100644
index 0000000000..035f7a72ad
--- /dev/null
+++ b/examples/kubernetes-windows-nodes/rendered_manifests/configmap-agent.yaml
@@ -0,0 +1,309 @@
+---
+# Source: splunk-otel-collector/templates/configmap-agent.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-agent
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ sapm:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ correlation: null
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ sync_host_metadata: true
+ splunk_hec/o11y:
+ disable_compression: true
+ endpoint: https://ingest.CHANGEME.signalfx.com/v1/log
+ log_data_enabled: true
+ profiling_data_enabled: false
+ token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ extensions:
+ health_check: null
+ k8s_observer:
+ auth_type: serviceAccount
+ node: ${K8S_NODE_NAME}
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ zpages: null
+ processors:
+ batch: null
+ filter/logs:
+ logs:
+ exclude:
+ match_type: strict
+ resource_attributes:
+ - key: splunk.com/exclude
+ value: "true"
+ groupbyattrs/logs:
+ keys:
+ - com.splunk.source
+ - com.splunk.sourcetype
+ - container.id
+ - fluent.tag
+ - istio_service_name
+ - k8s.container.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ k8sattributes:
+ extract:
+ annotations:
+ - from: pod
+ key: splunk.com/sourcetype
+ - from: namespace
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: pod
+ key: splunk.com/exclude
+ tag_name: splunk.com/exclude
+ - from: namespace
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ - from: pod
+ key: splunk.com/index
+ tag_name: com.splunk.index
+ labels:
+ - key: app
+ metadata:
+ - k8s.namespace.name
+ - k8s.node.name
+ - k8s.pod.name
+ - k8s.pod.uid
+ - container.id
+ - container.image.name
+ - container.image.tag
+ filter:
+ node_from_env_var: K8S_NODE_NAME
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.uid
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ - sources:
+ - from: resource_attribute
+ name: ip
+ - sources:
+ - from: connection
+ - sources:
+ - from: resource_attribute
+ name: host.name
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ metricstransform:
+ transforms:
+ - action: insert
+ include: container.memory.working_set
+ new_name: container.memory.usage
+ resource:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_agent_k8s:
+ attributes:
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/logs:
+ attributes:
+ - action: upsert
+ from_attribute: k8s.pod.annotations.splunk.com/sourcetype
+ key: com.splunk.sourcetype
+ - action: delete
+ key: k8s.pod.annotations.splunk.com/sourcetype
+ - action: delete
+ key: splunk.com/exclude
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ fluentforward:
+ endpoint: 0.0.0.0:8006
+ hostmetrics:
+ collection_interval: 10s
+ scrapers:
+ cpu: null
+ disk: null
+ filesystem: null
+ load: null
+ memory: null
+ network: null
+ paging: null
+ processes: null
+ jaeger:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ kubeletstats:
+ auth_type: serviceAccount
+ collection_interval: 10s
+ endpoint: ${K8S_NODE_IP}:10250
+ extra_metadata_labels:
+ - container.id
+ metric_groups:
+ - container
+ - pod
+ - node
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus/agent:
+ config:
+ scrape_configs:
+ - job_name: otel-agent
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ receiver_creator:
+ receivers:
+ smartagent/coredns:
+ config:
+ extraDimensions:
+ metric_source: k8s-coredns
+ port: 9153
+ type: coredns
+ rule: type == "pod" && labels["k8s-app"] == "kube-dns"
+ smartagent/kube-controller-manager:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-controller-manager
+ port: 10257
+ skipVerify: true
+ type: kube-controller-manager
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "pod" && labels["k8s-app"] == "kube-controller-manager"
+ smartagent/kubernetes-apiserver:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-apiserver
+ skipVerify: true
+ type: kubernetes-apiserver
+ useHTTPS: true
+ useServiceAccount: true
+ rule: type == "port" && port == 443 && pod.labels["k8s-app"] == "kube-apiserver"
+ smartagent/kubernetes-proxy:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-proxy
+ port: 10249
+ type: kubernetes-proxy
+ rule: type == "pod" && labels["k8s-app"] == "kube-proxy"
+ smartagent/kubernetes-scheduler:
+ config:
+ extraDimensions:
+ metric_source: kubernetes-scheduler
+ port: 10251
+ type: kubernetes-scheduler
+ rule: type == "pod" && labels["k8s-app"] == "kube-scheduler"
+ watch_observers:
+ - k8s_observer
+ signalfx:
+ endpoint: 0.0.0.0:9943
+ smartagent/signalfx-forwarder:
+ listenAddress: 0.0.0.0:9080
+ type: signalfx-forwarder
+ zipkin:
+ endpoint: 0.0.0.0:9411
+ service:
+ extensions:
+ - health_check
+ - k8s_observer
+ - memory_ballast
+ - zpages
+ pipelines:
+ logs:
+ exporters:
+ - splunk_hec/o11y
+ processors:
+ - memory_limiter
+ - groupbyattrs/logs
+ - k8sattributes
+ - filter/logs
+ - batch
+ - resource/logs
+ - resourcedetection
+ - resource
+ receivers:
+ - fluentforward
+ - otlp
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resourcedetection
+ - resource
+ - metricstransform
+ receivers:
+ - hostmetrics
+ - kubeletstats
+ - otlp
+ - receiver_creator
+ - signalfx
+ metrics/agent:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_agent_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/agent
+ traces:
+ exporters:
+ - sapm
+ - signalfx
+ processors:
+ - memory_limiter
+ - k8sattributes
+ - batch
+ - resourcedetection
+ - resource
+ receivers:
+ - otlp
+ - jaeger
+ - smartagent/signalfx-forwarder
+ - zipkin
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/configmap-cluster-receiver.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/configmap-cluster-receiver.yaml
new file mode 100644
index 0000000000..ae040bb721
--- /dev/null
+++ b/examples/kubernetes-windows-nodes/rendered_manifests/configmap-cluster-receiver.yaml
@@ -0,0 +1,108 @@
+---
+# Source: splunk-otel-collector/templates/configmap-cluster-receiver.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-otel-k8s-cluster-receiver
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ relay: |
+ exporters:
+ signalfx:
+ access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
+ api_url: https://api.CHANGEME.signalfx.com
+ ingest_url: https://ingest.CHANGEME.signalfx.com
+ timeout: 10s
+ extensions:
+ health_check: null
+ memory_ballast:
+ size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
+ processors:
+ batch: null
+ memory_limiter:
+ check_interval: 2s
+ limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
+ resource:
+ attributes:
+ - action: insert
+ key: metric_source
+ value: kubernetes
+ - action: upsert
+ key: k8s.cluster.name
+ value: CHANGEME
+ resource/add_collector_k8s:
+ attributes:
+ - action: insert
+ key: k8s.node.name
+ value: ${K8S_NODE_NAME}
+ - action: insert
+ key: k8s.pod.name
+ value: ${K8S_POD_NAME}
+ - action: insert
+ key: k8s.pod.uid
+ value: ${K8S_POD_UID}
+ - action: insert
+ key: k8s.namespace.name
+ value: ${K8S_NAMESPACE}
+ resource/k8s_cluster:
+ attributes:
+ - action: insert
+ key: receiver
+ value: k8scluster
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ override: true
+ timeout: 10s
+ receivers:
+ k8s_cluster:
+ auth_type: serviceAccount
+ metadata_exporters:
+ - signalfx
+ prometheus/k8s_cluster_receiver:
+ config:
+ scrape_configs:
+ - job_name: otel-k8s-cluster-receiver
+ scrape_interval: 10s
+ static_configs:
+ - targets:
+ - ${K8S_POD_IP}:8889
+ service:
+ extensions:
+ - health_check
+ - memory_ballast
+ pipelines:
+ metrics:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource
+ - resource/k8s_cluster
+ receivers:
+ - k8s_cluster
+ metrics/collector:
+ exporters:
+ - signalfx
+ processors:
+ - memory_limiter
+ - batch
+ - resource/add_collector_k8s
+ - resourcedetection
+ - resource
+ receivers:
+ - prometheus/k8s_cluster_receiver
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8889
diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/configmap-fluentd-json.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/configmap-fluentd-json.yaml
new file mode 100644
index 0000000000..3b6ede6591
--- /dev/null
+++ b/examples/kubernetes-windows-nodes/rendered_manifests/configmap-fluentd-json.yaml
@@ -0,0 +1,24 @@
+---
+# Source: splunk-otel-collector/templates/configmap-fluentd-json.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-fluentd-json
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ source.containers.parse.conf: |-
+ @type json
+ time_format %Y-%m-%dT%H:%M:%S.%NZ
+
+ output.filter.conf: ""
+
+ output.transform.conf: ""
diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/configmap-fluentd.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/configmap-fluentd.yaml
new file mode 100644
index 0000000000..f6dcf8f433
--- /dev/null
+++ b/examples/kubernetes-windows-nodes/rendered_manifests/configmap-fluentd.yaml
@@ -0,0 +1,352 @@
+---
+# Source: splunk-otel-collector/templates/configmap-fluentd.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: default-splunk-otel-collector-fluentd
+ labels:
+ app.kubernetes.io/name: splunk-otel-collector
+ helm.sh/chart: splunk-otel-collector-0.70.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: default
+ app.kubernetes.io/version: "0.70.0"
+ app: splunk-otel-collector
+ chart: splunk-otel-collector-0.70.0
+ release: default
+ heritage: Helm
+data:
+ fluent.conf: |-
+ @include system.conf
+ @include source.containers.conf
+ @include source.files.conf
+ @include source.journald.conf
+ @include output.conf
+ @include prometheus.conf
+
+ system.conf: |-
+ # system wide configurations
+
+ log_level info
+ root_dir /tmp/fluentd
+
+
+ prometheus.conf: |-
+ # input plugin that exports metrics
+
+
+ # input plugin that collects metrics from MonitorAgent
+
+
+ # input plugin that collects metrics for output plugin
+
+
+ source.containers.conf: |-
+ # This configuration file for Fluentd / td-agent is used
+ # to watch changes to Docker log files. The kubelet creates symlinks that
+ # capture the pod name, namespace, container name & Docker container ID
+ # to the docker logs for pods in the /var/log/containers directory on the host.
+ # If running this fluentd configuration in a Docker container, the /var/log
+ # directory should be mounted in the container.
+ # reading kubelet logs from journal
+ #
+ # Reference:
+ # https://github.com/kubernetes/community/blob/20d2f6f5498a5668bae2aea9dcaf4875b9c06ccb/contributors/design-proposals/node/kubelet-cri-logging.md
+ #
+ # Json Log Example:
+ # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
+ # CRI Log Example (not supported):
+ # 2016-02-17T00:04:05.931087621Z stdout P { 'long': { 'json', 'object output' },
+ # 2016-02-17T00:04:05.931087621Z stdout F 'splitted': 'partial-lines' }
+ # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
+
+
+ source.files.conf: |-
+ # This fluentd conf file contains sources for log files other than container logs.
+
+
+ source.journald.conf: |-
+ # This fluentd conf file contains configurations for reading logs from systemd journal.
+
+
+
+ output.conf: |-
+ #Events are emitted to the CONCAT label from the container, file and journald sources for multiline processing.
+
+
+ source.containers.parse.conf: |-
+ @type regexp
+ expression /^(?