diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4c06868c32..32caee11d2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,7 @@ repos: - repo: local hooks: - id: render - name: Pre-render Kubernetes resources + name: Create the rendered Kubernetes manifest resources for the project examples entry: make render language: system pass_filenames: false @@ -12,9 +12,9 @@ repos: rev: v3.2.0 hooks: - id: trailing-whitespace - exclude: "^rendered|^test" + exclude: "^examples|^test" - id: end-of-file-fixer - exclude: "^rendered|^test" + exclude: "^examples|^test" - id: check-yaml # Can't check source yaml since it has go templates in it. exclude: "^helm-charts|^test" diff --git a/CHANGELOG.md b/CHANGELOG.md index 0264cb9b08..3bb3e847c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## Unreleased +### Added + +- Refactored the examples and rendered directories into one for better usability ([#658](https://github.com/signalfx/splunk-otel-collector-chart/pull/658) + ## [0.70.0] - 2022-01-31 ### Added diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1f46960f95..18d8db2301 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -80,7 +80,7 @@ any 'help wanted' issues is a great place to start. ## Building -When changing the helm chart the files under `rendered` need to be rebuilt with `make render`. It's strongly recommended to use [pre-commit](https://pre-commit.com/) which will do this automatically for each commit (as well as run some linting). +When changing the helm chart the files under `examples/*/rendered_manifests` need to be rebuilt with `make render`. It's strongly recommended to use [pre-commit](https://pre-commit.com/) which will do this automatically for each commit (as well as run some linting). ## Running locally diff --git a/Makefile b/Makefile index f648b1c257..f8e0f6a8fc 100644 --- a/Makefile +++ b/Makefile @@ -1,87 +1,3 @@ .PHONY: render render: - rm -rf rendered/manifests - # Set for one of each telemetry type. - for i in metrics traces logs; do \ - dir=rendered/manifests/"$$i-only"; \ - mkdir -p "$$dir"; \ - helm template \ - --namespace default \ - --values rendered/values.yaml \ - --set splunkObservability.metricsEnabled=false,splunkObservability.tracesEnabled=false,splunkObservability.logsEnabled=false,splunkObservability.$${i}Enabled=true \ - --output-dir "$$dir" \ - default helm-charts/splunk-otel-collector; \ - mv "$$dir"/splunk-otel-collector/templates/* "$$dir"; \ - rm -rf "$$dir"/splunk-otel-collector; \ - done - - # Default configuration deployment. - dir=rendered/manifests/agent-only; \ - mkdir -p "$$dir"; \ - helm template \ - --namespace default \ - --values rendered/values.yaml \ - --output-dir "$$dir" \ - default helm-charts/splunk-otel-collector; \ - mv "$$dir"/splunk-otel-collector/templates/* "$$dir"; \ - rm -rf "$$dir"/splunk-otel-collector - - # Gateway mode deployment only. - dir=rendered/manifests/gateway-only; \ - mkdir -p "$$dir"; \ - helm template \ - --namespace default \ - --values rendered/values.yaml \ - --output-dir "$$dir" \ - --set agent.enabled=false,gateway.enabled=true,clusterReceiver.enabled=false \ - default helm-charts/splunk-otel-collector; \ - mv "$$dir"/splunk-otel-collector/templates/* "$$dir"; \ - rm -rf "$$dir"/splunk-otel-collector - - # Native OTel logs collection instead of fluentd. - dir=rendered/manifests/otel-logs; \ - mkdir -p "$$dir"; \ - helm template \ - --namespace default \ - --values rendered/values.yaml \ - --output-dir "$$dir" \ - --set logsEngine=otel,splunkObservability.logsEnabled=true \ - default helm-charts/splunk-otel-collector; \ - mv "$$dir"/splunk-otel-collector/templates/* "$$dir"; \ - rm -rf "$$dir"/splunk-otel-collector - - # eks/fargate deployment (with recommended gateway) - dir=rendered/manifests/eks-fargate; \ - mkdir -p "$$dir"; \ - helm template \ - --namespace default \ - --values rendered/values.yaml \ - --output-dir "$$dir" \ - --set distribution=eks/fargate,gateway.enabled=true,cloudProvider=aws \ - default helm-charts/splunk-otel-collector; \ - mv "$$dir"/splunk-otel-collector/templates/* "$$dir"; \ - rm -rf "$$dir"/splunk-otel-collector - - # network-explorer deployment (with recommended gateway) - dir=rendered/manifests/network-explorer; \ - mkdir -p "$$dir"; \ - helm template \ - --namespace default \ - --values rendered/values.yaml \ - --output-dir "$$dir" \ - --set networkExplorer.enabled=true,agent.enabled=false \ - default helm-charts/splunk-otel-collector; \ - mv "$$dir"/splunk-otel-collector/templates/* "$$dir"; \ - rm -rf "$$dir"/splunk-otel-collector - - # cluster-receiver objects collection enabled - dir=rendered/manifests/cluster-receiver-objects; \ - mkdir -p "$$dir"; \ - helm template \ - --namespace default \ - --values rendered/values.yaml \ - --output-dir "$$dir" \ - --set logsEngine=otel,splunkObservability.logsEnabled=true,clusterReceiver.k8sObjects[0].name=pods,clusterReceiver.k8sObjects[0].mode=pull,clusterReceiver.k8sObjects[0].interval=20,clusterReceiver.k8sObjects[1].name=events,clusterReceiver.k8sObjects[1].mode=watch \ - default helm-charts/splunk-otel-collector; \ - mv "$$dir"/splunk-otel-collector/templates/* "$$dir"; \ - rm -rf "$$dir"/splunk-otel-collector + bash ./examples/render-examples.sh diff --git a/README.md b/README.md index 1bfda9d72d..a4734c42cc 100644 --- a/README.md +++ b/README.md @@ -187,7 +187,7 @@ Instead of setting helm values as arguments a YAML file can be provided: helm install my-splunk-otel-collector --values my_values.yaml splunk-otel-collector-chart/splunk-otel-collector ``` -The [rendered directory](rendered) contains pre-rendered Kubernetes resource manifests. +The [examples directory](examples) contains examples of typical use cases with pre-rendered Kubernetes resource manifests for each example. ### How to upgrade diff --git a/RELEASE.md b/RELEASE.md index 867f1b9ae5..a983c57182 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -15,7 +15,7 @@ of `version` field. To make a new release of the helm chart: 1. Bump the `version` in [Chart.yaml](helm-charts/splunk-otel-collector/Chart.yaml) -2. Run `make render` to re-render the yaml rendered files. +2. Run `make render` to render all the examples with the latest changes. 3. Create PR and request review from the team. 4. When the PR gets merged, the release will automatically be made and the helm repo updated. 5. Release notes are not populated automatically. So make sure to update them manually using the notes from diff --git a/examples/README.md b/examples/README.md index c3c59888e6..5815c85fa9 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,283 +1,45 @@ -# Examples of chart configuration +# Examples of Helm Chart value configurations and resulting rendered Kubernetes manifests -The Splunk OpenTelemetry Collector Chart can be configured in many ways to -support different use-cases. Here is a collection of example values.yaml files -that can be used with the chart installation or upgrade commands to change the -default behavior. +## Structure + +Each example has a directory where each of the following is included. +- README.md: A short description about the example. +- A Helm values configuration file to demonstrate the example. +- A rendered_manifests directory that contains the rendered Kubernetes manifests for the example. + - Search for "CHANGEME" to find the values that must be changed in order to use the rendered manifests directly. + +## Using Install Examples Usage example: ``` -helm install my-splunk-otel-collector --values my-values.yaml splunk-otel-collector-chart/splunk-otel-collector +helm install my-splunk-otel-collector --values path-to-values-file.yaml splunk-otel-collector-chart/splunk-otel-collector ``` -All of the provided examples must also include the required parameters: +## Common Configurations + +The Splunk OpenTelemetry Collector Chart can be configured to export data to +to the following targets: +- [Splunk Enterprise](https://www.splunk.com/en_us/software/splunk-enterprise.html) +- [Splunk Cloud Platform](https://www.splunk.com/en_us/software/splunk-cloud-platform.html) +- [Splunk Observability Cloud](https://www.observability.splunk.com/) + +All the provided examples must include one of these two configuration sets to +know which target to export data to. +Use these configurations for exporting data to Splunk Enterprise or Splunk Cloud Platform. ```yaml # Splunk Platform required parameters +clusterName: CHANGEME splunkPlatform: - token: xxxxxx + token: CHANGEME endpoint: http://localhost:8088/services/collector ``` -or - +Use these configurations for exporting data to Splunk Observability Cloud. ```yaml # Splunk Observability required parameters -clusterName: my-cluster +clusterName: CHANGEME splunkObservability: - realm: us0 - accessToken: my-access-token -``` - -## Enable traces sampling - -This example shows how to change default OTel Collector configuration to add -[Probabilistic Sampling Processor](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor/probabilisticsamplerprocessor). -This approach can be used for any other OTel Collector re-configuration as well. -Final OTel config will be created by merging the custom config provided in -`agent.config` into [default configuration of agent-mode -collector](https://github.com/signalfx/splunk-otel-collector-chart/blob/main/helm-charts/splunk-otel-collector/templates/config/_otel-agent.tpl). - -```yaml -agent: - config: - processors: - probabilistic_sampler: - hash_seed: 22 - sampling_percentage: 15.3 - service: - pipelines: - traces: - processors: - - memory_limiter - - probabilistic_sampler - - k8sattributes - - batch - - resource - - resourcedetection -``` - -In the example above, first we define a new processor, then add it to the -default traces pipeline. The pipeline has to be fully redefined, because -lists cannot merge - they have to be overridden. - -## Add Receiver Creator - -This example shows how to add a receiver creator to the OTel Collector configuration -[Receiver Creator](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/receivercreator). -In this example we will configure it to observe kubernetes pods and in case there is a pod -using port 5432 the collector will dynamically create a smartagent/postgresql receiver to monitor it. - -```yaml -agent: - config: - receivers: - receiver_creator: - watch_observers: [k8s_observer] - receivers: - smartagent/postgresql: - rule: type == "port" && port == 5432 - config: - type: postgresql - connectionString: 'sslmode=disable user={{.username}} password={{.password}}' - params: - username: postgres - password: password - port: 5432 -``` - -By default, the receiver_creator receiver is part of the metrics pipeline, for example: -```yaml -pipelines: - metrics: - receivers: - - hostmetrics - - kubeletstats - - otlp - - receiver_creator - - signalfx -``` - -## Enable OTel Collector in the gateway mode - -This configuration installs collector as a gateway deployment along with -regular components. All the telemetry will be routed through this collector. -By default, the gateway-mode collector deployed with 3 replicas with 4 CPU -cores and 8Gb of memory each, but this can be easily changed as in this example. -`resources` can be adjusted for other components as well: `agent`, -`clusterReceiver`, `fluentd`. - -```yaml -gateway: - enabled: true - replicaCount: 1 - resources: - limits: - cpu: 2 - memory: 4Gb -``` - -## Deploy gateway-mode OTel Collector only - -This configuration will install collector as a gateway deployment only. -No metrics or logs will be collected from the gateway instance(s), the gateway -can be used to forward telemetry data through it for aggregation, enrichment -purposes. - -```yaml -gateway: - enabled: true -agent: - enabled: false -clusterReceiver: - enabled: false -``` - -## Route telemetry data through a gateway deployed separately - -The following configuration can be used to forward telemetry through an OTel -collector gateway deployed separately. - -```yaml -agent: - config: - exporters: - otlp: - endpoint: :4317 - tls: - insecure: true - signalfx: - ingest_url: http://:9943 - api_url: http://:6060 - service: - pipelines: - traces: - exporters: [otlp, signalfx] - metrics: - exporters: [otlp] - logs: - exporters: [otlp] - -clusterReceiver: - config: - exporters: - signalfx: - ingest_url: http://:9943 - api_url: http://:6060 -``` - -OTLP format is used between agent and gateway wherever possible for performance -reasons. OTLP is almost the same as internal data representation in OTel -Collector, so using it between agent and gateway reduce CPU cycles spent on -data format transformations. - -## Route telemetry data through a proxy server - -This configuration shows how to add extra environment variables to OTel -Collector containers to send the traffic through a proxy server from -both components that are enabled by default. - -```yaml -agent: - extraEnvs: - - name: HTTPS_PROXY - value: "192.168.0.10" -clusterReceiver: - extraEnvs: - - name: HTTPS_PROXY - value: "192.168.0.10" -``` - -## Enable multiline logs parsing of Java stack traces - -This configuration shows how to enable parsing of Java stack trace from all -pods in the cluster starting with "java-app" name. - -```yaml -fluentd: - config: - logs: - java-app: - from: - pod: "java-app" - multiline: - firstline: /\d{4}-\d{1,2}-\d{1,2}/ -``` - -## Filter out specific containers - -This example shows how you can filter out specific containers from metrics -pipelines. This could be adapted for other metadata or pipelines. -Filters should be added to both the agent and the cluster receiver. - -```yaml -agent: - config: - processors: - filter/exclude_containers: - metrics: - exclude: - match_type: regexp - resource_attributes: - - Key: k8s.container.name - Value: '^(containerX|containerY)$' - service: - pipelines: - metrics: - processors: - - memory_limiter - - batch - - resourcedetection - - resource - - filter/exclude_containers -clusterReceiver: - config: - processors: - filter/exclude_containers: - metrics: - exclude: - match_type: regexp - resource_attributes: - - Key: k8s.container.name - Value: '^(containerX|containerY)$' - service: - pipelines: - metrics: - processors: - - memory_limiter - - batch - - resource - - resource/k8s_cluster - - filter/exclude_containers -``` - -# Logs collection configuration for CRI-O container runtime - -Default logs collection is configured for Docker container runtime. -The following configuration should be set for CRI-O or containerd runtimes, -e.g. OpenShift. - -```yaml -fluentd: - config: - containers: - logFormatType: cri - criTimeFormat: "%Y-%m-%dT%H:%M:%S.%N%:z" -``` - -`criTimeFormat` can be used to configure logs collection for different log -formats, e.g. `criTimeFormat: "%Y-%m-%dT%H:%M:%S.%NZ"` for IBM IKS. - -# Route log records to specific Splunk Enterprise indexes - -Configure log collection to set the index to target with logs to the name -of the kubernetes namespace they originate from. - -```yaml -logsCollection: - containers: - extraOperators: - - type: copy - from: resource["k8s.namespace.name"] - to: resource["com.splunk.index"] + realm: CHANGEME + accessToken: CHANGEME ``` diff --git a/examples/add-receiver-creator-values.yaml b/examples/add-receiver-creator-values.yaml deleted file mode 100644 index ee4cb4fb55..0000000000 --- a/examples/add-receiver-creator-values.yaml +++ /dev/null @@ -1,15 +0,0 @@ -agent: - config: - receivers: - receiver_creator: - watch_observers: [k8s_observer] - receivers: - smartagent/postgresql: - rule: type == "port" && port == 5432 - config: - type: postgresql - connectionString: 'sslmode=disable user={{.username}} password={{.password}}' - params: - username: postgres - password: password - port: 5432 diff --git a/examples/add-receiver-creator/README.md b/examples/add-receiver-creator/README.md new file mode 100644 index 0000000000..2e089608f8 --- /dev/null +++ b/examples/add-receiver-creator/README.md @@ -0,0 +1,21 @@ +# Example of chart configuration + +## Add Receiver Creator + +This example shows how to add a receiver creator to the OTel Collector configuration +[Receiver Creator](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/receivercreator). +In this example we will configure it to observe kubernetes pods and in case there is a pod +using port 5432 the collector will dynamically create a smartagent/postgresql receiver to monitor it +and using port 5433 the collector will dynamically create an OpenTelemetry postgresql receiver. + +By default, the receiver_creator receiver is part of the metrics pipeline, for example: +```yaml +pipelines: + metrics: + receivers: + - hostmetrics + - kubeletstats + - otlp + - receiver_creator + - signalfx +``` diff --git a/examples/add-receiver-creator/add-receiver-creator-values.yaml b/examples/add-receiver-creator/add-receiver-creator-values.yaml new file mode 100644 index 0000000000..ffed817472 --- /dev/null +++ b/examples/add-receiver-creator/add-receiver-creator-values.yaml @@ -0,0 +1,30 @@ +clusterName: CHANGEME +splunkObservability: + realm: CHANGEME + accessToken: CHANGEME + +agent: + config: + receivers: + receiver_creator: + watch_observers: [k8s_observer] + receivers: + # Legacy Smart Agent PostgreSQL receiver + # See: https://docs.splunk.com/Observability/gdi/postgresql/postgresql.html + smartagent/postgresql: + rule: type == "port" && port == 5432 + config: + type: postgresql + connectionString: 'sslmode=disable user={{.username}} password={{.password}}' + params: + username: postgres + password: password + port: 5432 + # Current OpenTelemetry PostgreSQL receiver + # See: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/postgresqlreceiver + postgresql: + rule: type == "port" && port == 5433 + config: + username: postgres + password: password + endpoint: localhost:5433 diff --git a/rendered/manifests/agent-only/clusterRole.yaml b/examples/add-receiver-creator/rendered_manifests/clusterRole.yaml similarity index 100% rename from rendered/manifests/agent-only/clusterRole.yaml rename to examples/add-receiver-creator/rendered_manifests/clusterRole.yaml diff --git a/rendered/manifests/agent-only/clusterRoleBinding.yaml b/examples/add-receiver-creator/rendered_manifests/clusterRoleBinding.yaml similarity index 100% rename from rendered/manifests/agent-only/clusterRoleBinding.yaml rename to examples/add-receiver-creator/rendered_manifests/clusterRoleBinding.yaml diff --git a/rendered/manifests/otel-logs/configmap-agent.yaml b/examples/add-receiver-creator/rendered_manifests/configmap-agent.yaml similarity index 63% rename from rendered/manifests/otel-logs/configmap-agent.yaml rename to examples/add-receiver-creator/rendered_manifests/configmap-agent.yaml index d29a52e1fc..3c4086b631 100644 --- a/rendered/manifests/otel-logs/configmap-agent.yaml +++ b/examples/add-receiver-creator/rendered_manifests/configmap-agent.yaml @@ -26,15 +26,7 @@ data: correlation: null ingest_url: https://ingest.CHANGEME.signalfx.com sync_host_metadata: true - splunk_hec/o11y: - disable_compression: true - endpoint: https://ingest.CHANGEME.signalfx.com/v1/log - log_data_enabled: true - profiling_data_enabled: false - token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} extensions: - file_storage: - directory: /var/addon/splunk/otel_pos health_check: null k8s_observer: auth_type: serviceAccount @@ -51,6 +43,17 @@ data: resource_attributes: - key: splunk.com/exclude value: "true" + groupbyattrs/logs: + keys: + - com.splunk.source + - com.splunk.sourcetype + - container.id + - fluent.tag + - istio_service_name + - k8s.container.name + - k8s.namespace.name + - k8s.pod.name + - k8s.pod.uid k8sattributes: extract: annotations: @@ -133,108 +136,6 @@ data: override: true timeout: 10s receivers: - filelog: - encoding: utf-8 - exclude: - - /var/log/pods/default_default-splunk-otel-collector*_*/otel-collector/*.log - fingerprint_size: 1kb - force_flush_period: "0" - include: - - /var/log/pods/*/*/*.log - include_file_name: false - include_file_path: true - max_concurrent_files: 1024 - max_log_size: 1MiB - operators: - - id: get-format - routes: - - expr: body matches "^\\{" - output: parser-docker - - expr: body matches "^[^ Z]+ " - output: parser-crio - - expr: body matches "^[^ Z]+Z" - output: parser-containerd - type: router - - id: parser-crio - regex: ^(?P