From b4b526a16ef4a44012f3400a4dfd9a24eced674e Mon Sep 17 00:00:00 2001 From: jvoravong Date: Mon, 11 Sep 2023 14:19:53 -0600 Subject: [PATCH 1/5] Add ability to update and track operator auto-instrumentation images in values.yaml file --- .../workflows/update_chart_dependencies.yaml | 28 ++- .../update_instrumentation_dependencies.yaml | 60 ++--- ci_scripts/base_util.sh | 92 ++++++++ ci_scripts/update-images-operator-otel.sh | 98 +++++++++ ci_scripts/update-images-operator-splunk.sh | 63 ++++++ .../operator/instrumentation.yaml | 58 +++-- .../templates/operator/_helpers.tpl | 207 ++++++++++++++---- .../templates/operator/instrumentation.yaml | 35 ++- helm-charts/splunk-otel-collector/values.yaml | 49 +++-- 9 files changed, 578 insertions(+), 112 deletions(-) create mode 100755 ci_scripts/base_util.sh create mode 100755 ci_scripts/update-images-operator-otel.sh create mode 100755 ci_scripts/update-images-operator-splunk.sh diff --git a/.github/workflows/update_chart_dependencies.yaml b/.github/workflows/update_chart_dependencies.yaml index 8e273dcfc2..6b6646795b 100644 --- a/.github/workflows/update_chart_dependencies.yaml +++ b/.github/workflows/update_chart_dependencies.yaml @@ -1,10 +1,23 @@ name: Check for new chart dependency updates +# Description: +# This workflow automates the process of checking for and updating Helm chart dependencies. +# Specifically, it: +# 1. Checks for new versions of (subchart) dependencies listed in chart.yaml. +# 2. Updates chart.yaml with new versions where applicable. +# 3. If the 'opentelemetry-operator' subchart is updated in chart.yaml, it also updates related +# image tags in values.yaml. + on: schedule: # Run every Monday at noon. - cron: "0 12 * * 1" workflow_dispatch: + inputs: + DEBUG_MODE: + description: 'Enable debug mode' + required: false + default: 'false' env: CHART_YAML: helm-charts/splunk-otel-collector/Chart.yaml @@ -21,7 +34,15 @@ jobs: - name: Update Chart id: update_chart run: | - # Run make repo-update to ensure repositories are up-to-date + echo "Update dependencies for Helm chart" + + # Set debug argument if DEBUG_MODE is true + DEBUG_ARG="" + if [ "${{ github.event.inputs.DEBUG_MODE }}" == "true" ]; then + DEBUG_ARG="--debug" + fi + + # Ensure repositories are up-to-date make repo-update # Fetch the latest version using helm search repo @@ -44,6 +65,11 @@ jobs: DEP_LINE=$(yq eval ".dependencies | keys | map(tonumber) | map(select(. != null)) | map(select(. < 10000)) | map(. + 1)" $CHART_YAML | jq ".[] | select(.[\"name\"] == \"${{ matrix.repo }}\")") sed -i "${DEP_LINE}s/$DEP_PATH/$LATEST_VER/" $CHART_YAML + if [ ${{ matrix.repo }} == "opentelemetry-operator" ]; then + echo "Update Splunk Operator Instrumentation Images if Needed" + ./ci_scripts/update-images-operator-otel.sh ${{ matrix.language }} $DEBUG_ARG || exit 1 + fi + echo Updating rendered examples make render diff --git a/.github/workflows/update_instrumentation_dependencies.yaml b/.github/workflows/update_instrumentation_dependencies.yaml index 2adfe589b8..0141ab061c 100644 --- a/.github/workflows/update_instrumentation_dependencies.yaml +++ b/.github/workflows/update_instrumentation_dependencies.yaml @@ -1,10 +1,19 @@ name: Check for new instrumentation versions +# Description: +# This workflow is responsible for checking for new versions of Splunk instrumentation libraries +# used in operator based auto-instrumentation and updating the values.yaml if necessary. + on: schedule: # Run every 12th hour at minute 45 past. - cron: "45 */12 * * *" workflow_dispatch: + inputs: + DEBUG_MODE: + description: 'Enable debug mode' + required: false + default: 'false' env: VALUES_YAML: helm-charts/splunk-otel-collector/values.yaml @@ -15,43 +24,36 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - # Currently this workflow will update the listed operator instrumentation dependencies in values.yaml - language: ['java'] # Add other languages here + # Add languages that require version updates here + language: ['java'] steps: - uses: actions/checkout@v4 - - name: swizzle version + + - name: Update Version id: swizzle_version run: | - REPO=ghcr.io/signalfx/splunk-otel-${{ matrix.language }}/splunk-otel-${{ matrix.language }} - LOCAL_VER=$(grep $REPO $VALUES_YAML | awk -F: '{print $2}' | tr -d ' "') - LATEST_VER=$(curl -qs -H "Accept: application/vnd.github+json" $(echo $LATEST_API | sed "s/{lang}/${{ matrix.language }}/g") | jq -r .tag_name) - echo "LATEST_VER=$LATEST_VER" >> $GITHUB_OUTPUT - echo "Current version of ${{ matrix.language }} is $LOCAL_VER, latest is $LATEST_VER" + echo "Update Splunk Operator Instrumentation for ${{ matrix.language }}" - if [ "$LATEST_VER" == "$LOCAL_VER" ]; then - echo We are already up to date. Nothing else to do. - else - echo 'Verifying that the image is pullable...' - echo '(If this fails, the image version is out of sync with ghcr version)' - docker pull $REPO:$LATEST_VER - echo 'Looks like we are good to update...' - echo Updating to new version in values.yaml - echo "NEED_UPDATE=1" >> $GITHUB_OUTPUT - VLINE=$(grep -n "${REPO}" $VALUES_YAML | cut -f1 -d:) - echo "Line number for ${REPO} in ${VALUES_YAML} is: ${VLINE}" - OLD_VER=$(sed -n "${VLINE}p" $VALUES_YAML | grep -oP 'v\K[0-9.]+') - echo "Old version number is: ${OLD_VER}" - NEW_VER=${LATEST_VER#v} # removes 'v' from the start of the string - echo "New version number is: ${NEW_VER}" - echo "sed: ${VLINE}s/${OLD_VER}/${NEW_VER}/" - sed -i "${VLINE}s/${OLD_VER}/${NEW_VER}/" $VALUES_YAML + # Set debug argument if DEBUG_MODE is true + DEBUG_ARG="" + if [ "${{ github.event.inputs.DEBUG_MODE }}" == "true" ]; then + DEBUG_ARG="--debug" + fi - echo Render chart template - make render + # Run the update script and handle errors + ./ci_scripts/update-images-operator-splunk.sh ${{ matrix.language }} $DEBUG_ARG || exit 1 - echo "Current git diff:" - git --no-pager diff + # Check if an update is needed + if [ "$NEED_UPDATE" -eq 0 ]; then + echo "No updates detected. Exiting." + exit 0 fi + + echo "Rendering chart template..." + make render + + echo "Displaying current git diff..." + git --no-pager diff - name: PR the new version if: ${{ steps.swizzle_version.outputs.NEED_UPDATE == 1 }} uses: peter-evans/create-pull-request@v5 diff --git a/ci_scripts/base_util.sh b/ci_scripts/base_util.sh new file mode 100755 index 0000000000..7ee8667294 --- /dev/null +++ b/ci_scripts/base_util.sh @@ -0,0 +1,92 @@ +#!/bin/bash +# Base Utility Functions Library For CI/CD +# This script provides a set of utility functions for debugging, variable setting, +# and common CI/CD operations. It's designed to be sourced by other scripts to +# provide a standardized way of setting variables, debugging, and handling common +# tasks like fetching Helm chart resources. + +# Note: This utility sets "set -e", which will cause any script that sources it +# to exit if any command fails. Make sure your script is compatible with this behavior. +set -e + +# Paths for the Helm chart resources +CHART_FILE_PATH="$SCRIPT_DIR/../helm-charts/splunk-otel-collector/Chart.yaml" +VALUES_FILE_PATH="$SCRIPT_DIR/../helm-charts/splunk-otel-collector/values.yaml" + +# Set default OWNER to "signalfx" if not already set +: "${OWNER:=signalfx}" # Sets OWNER to "signalfx" if it is not already set + +# Debug mode is off by default but can be enabled with --debug +: "${DEBUG_MODE:=0}" # Sets DEBUG_MODE to 0 if it is not already set + +# Iterate over all arguments of the calling script +for arg in "$@"; do + if [[ "$arg" == "--debug" ]]; then + DEBUG_MODE=1 # Enable debug mode + # Remove --debug from arguments + for index in "${!@}"; do + if [[ "${!index}" == "--debug" ]]; then + unset "$index" + break + fi + done + # Re-index the arguments array + set -- "${@}" + fi +done + +# ---- Debug Methods ---- +# These methods provide functions for setting and debugging variables. +# To use this utility, source it in your script as shown in the example below: +# +# Example: +# ```bash +# #!/bin/bash +# # Source the utility script to get access to its functions and variables +# source /path/to/base_util.sh +# +# # Now you can use the utility functions and variables in this script +# DEBUG_MODE=1 # Turn on debug mode +# setd "my_var" "Hello, World!" +# debug "a string value" +# debug "$TEMP_FILE_WITH_CONTENT_PATH" +# ``` + +# Function: setd +# Description: Sets a variable and outputs a debug message. +# Usage: setd "variable_name" "value" +setd() { + eval "$1=\"$2\"" # Set a variable with the given name and value + debug "$1" # Call the debug function to output the variable +} + +# Function: debug +# Description: Outputs debug information based on the DEBUG_MODE setting. +# Supports variables, strings, and file paths for file content. +# Usage: debug "variable_name" +debug() { + if [[ $DEBUG_MODE -eq 1 ]]; then + local var_name="$1" + local var_value="${!var_name}" # Indirect reference to get the value + if [[ -f "$var_value" ]]; then + echo "[DEBUG] $var_name: Content of file $var_value:" + cat "$var_value" + else + echo "[DEBUG] $var_name: $var_value" + fi + fi +} + +# Function: emit_output +# Description: Outputs a given environment variable either to GitHub output or stdout. +# Usage: emit_output "VAR_NAME" +emit_output() { + local var_name="$1" + local var_value="${!var_name}" # Indirect reference to get the value + + if [ -n "$GITHUB_OUTPUT" ]; then + echo "${var_name}=${var_value}" >> "$GITHUB_OUTPUT" + else + echo "${var_name}=${var_value}" + fi +} diff --git a/ci_scripts/update-images-operator-otel.sh b/ci_scripts/update-images-operator-otel.sh new file mode 100755 index 0000000000..00cc08afc7 --- /dev/null +++ b/ci_scripts/update-images-operator-otel.sh @@ -0,0 +1,98 @@ +#!/bin/bash +# Purpose: Updates OpenTelemetry and Splunk images for auto-instrumentation. +# Notes: +# - OpenTelemetry images are centralized and may change with operator subchart updates. +# - Splunk images are decentralized and have a separate update mechanism and release cadence. +# +# Example Usage: +# ./update-images-operator-otel.sh +# ./update-images-operator-otel.sh --debug + +# Include the base utility functions for setting and debugging variables +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "$SCRIPT_DIR/base_util.sh" + +# ---- Initialize Temporary Files ---- +# Create a temporary file to hold a subsection of the values.yaml file +setd "TEMP_VALUES_FILE" "$SCRIPT_DIR/temp_values_subsection.yaml" +# Create a temporary file to store version information +setd "TEMP_VERSIONS" "$SCRIPT_DIR/versions.txt" + +# ---- Operator Subchart Version Extraction ---- +# Extract the version of the opentelemetry-operator subchart from the main Chart.yaml +# This version helps us fetch the corresponding appVersion and image versions. +SUBCHART_VERSION=$(yq eval '.dependencies[] | select(.name == "opentelemetry-operator") | .version' "$CHART_FILE_PATH") +echo "Opentelemetry Operator Subchart Version: $SUBCHART_VERSION" + +# ---- Fetching App Version ---- +# Fetch the appVersion corresponding to the Operator subchart Version. +# This is extracted from the subchart's definition in the Chart.yaml file. +SUBCHART_URL="https://raw.githubusercontent.com/open-telemetry/opentelemetry-helm-charts/opentelemetry-operator-$SUBCHART_VERSION/charts/opentelemetry-operator/Chart.yaml" +debug "Fetching: $SUBCHART_URL" +APP_VERSION=$(curl -s "$SUBCHART_URL" | grep 'appVersion:' | awk '{print $2}') +debug "Operator App Version: $APP_VERSION" + +# ---- Fetch Version Mapping ---- +# Fetch the version mappings from versions.txt for the fetched appVersion. +# This gives us a mapping of image keys to their corresponding version tags. +VERSIONS_URL="https://raw.githubusercontent.com/open-telemetry/opentelemetry-operator/v$APP_VERSION/versions.txt" +debug "Fetching: $VERSIONS_URL" +curl -s "$VERSIONS_URL" > "$TEMP_VERSIONS" +debug "Values from Operator OpenTelemetry versions.txt file containing image tags" +debug "$TEMP_VERSIONS" + +# ---- Extract Subsection for Update ---- +# Extract the content between "# Auto-instrumentation Libraries (Start)" and "# Auto-instrumentation Libraries (End)" +awk '/# Auto-instrumentation Libraries \(Start\)/,/# Auto-instrumentation Libraries \(End\)/' "$VALUES_FILE_PATH" | grep -v "# Auto-instrumentation Libraries " > "$TEMP_VALUES_FILE" + +# ---- Update Image Information ---- +while IFS='=' read -r IMAGE_KEY VERSION; do + NEED_UPDATE="${NEED_UPDATE:-0}" # Sets NEED_UPDATE to its current value or 0 if not set + if [[ "$IMAGE_KEY" =~ ^autoinstrumentation-.* ]]; then + # Upstream Operator Values + setd "INST_LIB_NAME" "${IMAGE_KEY#autoinstrumentation-}" + setd "REPOSITORY_UPSTREAM" "ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-${INST_LIB_NAME}" + setd "TAG_UPSTREAM" "${VERSION}" + + setd "REPOSITORY_LOCAL_PATH" "${INST_LIB_NAME}.repository" + setd "REPOSITORY_LOCAL" "$(yq eval ".${REPOSITORY_LOCAL_PATH}" "${TEMP_VALUES_FILE}")" + + if [[ -z "${REPOSITORY_LOCAL}" || "${REPOSITORY_LOCAL}" != *"splunk"* ]]; then + yq eval -i ".${REPOSITORY_LOCAL_PATH} = \"${REPOSITORY_UPSTREAM}\"" "${TEMP_VALUES_FILE}" + + setd "TAG_LOCAL_PATH" "${INST_LIB_NAME}.tag" + setd "TAG_LOCAL" "$(yq eval ".${TAG_LOCAL_PATH}" "${TEMP_VALUES_FILE}")" + if [[ -z "${TAG_LOCAL}" || "${TAG_LOCAL}" == "null" || "${TAG_LOCAL}" != "$TAG_UPSTREAM" ]]; then + debug "Upserting value for ${REPOSITORY_LOCAL}:${TAG_LOCAL}" + yq eval -i ".${TAG_LOCAL_PATH} = \"${TAG_UPSTREAM}\"" "${TEMP_VALUES_FILE}" + setd "NEED_UPDATE" 1 + else + debug "Retaining existing value for ${REPOSITORY_LOCAL}:${TAG_LOCAL}" + fi + else + # Splunk instrumentation libraries are updated in a different workflow. + debug "Skipping updating ${REPOSITORY_LOCAL}:${TAG_LOCAL}" + fi + fi +done < "${TEMP_VERSIONS}" + +# Emit the NEED_UPDATE variable to either GitHub output or stdout +emit_output "NEED_UPDATE" + +# Merge the updated subsection back into values.yaml +# This approach specifically updates only the subsection between the start and end tokens. +# By doing so, we avoid reformatting the entire file, thus preserving the original structure and comments. +awk ' + !p && !/# Auto-instrumentation Libraries \(Start\)/ && !/# Auto-instrumentation Libraries \(End\)/ { print $0; next } + /# Auto-instrumentation Libraries \(Start\)/ {p=1; print $0; next} + /# Auto-instrumentation Libraries \(End\)/ {p=0; while((getline line < "'$TEMP_VALUES_FILE'") > 0) printf " %s\n", line; print $0; next} +' "$VALUES_FILE_PATH" > "${VALUES_FILE_PATH}.updated" + +# Replace the original values.yaml with the updated version +mv "${VALUES_FILE_PATH}.updated" "$VALUES_FILE_PATH" +# Cleanup temporary files +rm "$TEMP_VALUES_FILE" +rm "$TEMP_VERSIONS" + +echo "Image update process completed successfully!" +exit 0 diff --git a/ci_scripts/update-images-operator-splunk.sh b/ci_scripts/update-images-operator-splunk.sh new file mode 100755 index 0000000000..d5a4e4f721 --- /dev/null +++ b/ci_scripts/update-images-operator-splunk.sh @@ -0,0 +1,63 @@ +#!/bin/bash +# Purpose: Updates Splunk images for auto-instrumentation. +# Notes: +# - This script updates the instrumentation libraries from Splunk's repositories. +# - This script will always pull the latest version of a specific Splunk instrumentation library. +# - OpenTelemetry images are updated differently and are not handled by this script. +# Parameters: +# 1: Name of the instrumentation library (mandatory) +# --debug: Enable debug mode (optional) +# +# Example Usage: +# ./update-images-operator-splunk.sh java +# ./update-images-operator-splunk.sh nodejs --debug + +# Include the base utility functions for setting and debugging variables +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "$SCRIPT_DIR/base_util.sh" + +# ---- Validate Input Arguments ---- +# Check for command-line arguments +if [ "$#" -eq 0 ]; then + echo "Error: No arguments provided." + echo "Usage: $0 [--debug]" + exit 1 +fi + +# ---- Initialize Variables ---- +# Set the instrumentation library name +setd "INST_LIB_NAME" "$1" + +# Set repository-related variables +setd "REPO" "ghcr.io/${OWNER}/splunk-otel-${INST_LIB_NAME}/splunk-otel-${INST_LIB_NAME}" +setd "REPOSITORY_LOCAL_PATH" "operator.instrumentation.spec.${INST_LIB_NAME}.repository" +setd "REPOSITORY_LOCAL" "$(yq eval ".${REPOSITORY_LOCAL_PATH}" "${VALUES_FILE_PATH}")" +setd "TAG_LOCAL_PATH" "operator.instrumentation.spec.${INST_LIB_NAME}.tag" +setd "TAG_LOCAL" "$(yq eval ".${TAG_LOCAL_PATH}" "${VALUES_FILE_PATH}")" + +# ---- Fetch Latest Version ---- +# Fetch the latest version from GitHub +setd "LATEST_API" "https://api.github.com/repos/${OWNER}/splunk-otel-${INST_LIB_NAME}/releases/latest" +setd "LATEST_API_CURL" "curl -L -qs -H 'Accept: application/vnd.github+json' \"$LATEST_API\" | jq -r .tag_name" +setd "TAG_UPSTREAM" "$(eval $LATEST_API_CURL)" + +# ---- Display Version Information ---- +# Display current and latest versions +echo "${REPOSITORY_LOCAL} -> Local tag: ${TAG_LOCAL}, Latest tag: $TAG_UPSTREAM" + +# ---- Update Version Information ---- +# If needed, update the tag version in values.yaml +setd "NEED_UPDATE" "${NEED_UPDATE:-0}" # Sets NEED_UPDATE to its current value or 0 if not set +if [ "$TAG_UPSTREAM" == "$TAG_LOCAL" ]; then + echo "We are already up to date. Nothing else to do." +elif [[ -z "$TAG_LOCAL" || "$TAG_LOCAL" == "null" || "$TAG_LOCAL" != "$TAG_UPSTREAM" ]]; then + debug "Upserting value for ${REPOSITORY_LOCAL}:${TAG_LOCAL}" + yq eval -i ".${TAG_LOCAL_PATH} = \"$TAG_UPSTREAM\"" "${VALUES_FILE_PATH}" + setd "NEED_UPDATE" 1 # Setting NEED_UPDATE to 1 as an update is required +fi + +# Emit the NEED_UPDATE variable to either GitHub output or stdout +emit_output "NEED_UPDATE" + +echo "Image update process completed successfully!" +exit 0 diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml index 8f736e0283..d571f9d10c 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml @@ -17,25 +17,53 @@ metadata: heritage: Helm app.kubernetes.io/component: otel-operator spec: - dotnet: - env: - - name: OTEL_EXPORTER_OTLP_ENDPOINT - value: http://$(SPLUNK_OTEL_AGENT):4318 - env: + exporter: + endpoint: http://$(SPLUNK_OTEL_AGENT):4317 + propagators: + - tracecontext + - baggage + - b3 + env: - name: SPLUNK_OTEL_AGENT valueFrom: fieldRef: apiVersion: v1 fieldPath: status.hostIP - exporter: - endpoint: http://$(SPLUNK_OTEL_AGENT):4317 - java: - image: ghcr.io/signalfx/splunk-otel-java/splunk-otel-java:v1.28.0 - propagators: - - tracecontext - - baggage - - b3 - python: - env: + apache-httpd: + image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd/1.0.3 + env: + - name: OTEL_RESOURCE_ATTRIBUTES + value: splunk.zc.method=autoinstrumentation-apache-httpd:1.0.3 + dotnet: + image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet/1.0.0-rc.2 + env: + - name: OTEL_RESOURCE_ATTRIBUTES + value: splunk.zc.method=autoinstrumentation-dotnet:1.0.0-rc.2 + # dotnet auto-instrumentation uses http/proto by default, so data must be sent to 4318 instead of 4317. + # See: https://github.com/open-telemetry/opentelemetry-operator#opentelemetry-auto-instrumentation-injection + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(SPLUNK_OTEL_AGENT):4318 + go: + image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-go/v0.2.2-alpha + env: + - name: OTEL_RESOURCE_ATTRIBUTES + value: splunk.zc.method=autoinstrumentation-go:v0.2.2-alpha + java: + image: ghcr.io/signalfx/splunk-otel-java/splunk-otel-java/v1.27.0 + env: + - name: OTEL_RESOURCE_ATTRIBUTES + value: splunk.zc.method=splunk-otel-java:v1.27.0 + nodejs: + image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs/0.41.1 + env: + - name: OTEL_RESOURCE_ATTRIBUTES + value: splunk.zc.method=autoinstrumentation-nodejs:0.41.1 + python: + image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python/0.40b0 + env: + - name: OTEL_RESOURCE_ATTRIBUTES + value: splunk.zc.method=autoinstrumentation-python:0.40b0 + # python auto-instrumentation uses http/proto by default, so data must be sent to 4318 instead of 4317. + # See: https://github.com/open-telemetry/opentelemetry-operator#opentelemetry-auto-instrumentation-injection - name: OTEL_EXPORTER_OTLP_ENDPOINT value: http://$(SPLUNK_OTEL_AGENT):4318 diff --git a/helm-charts/splunk-otel-collector/templates/operator/_helpers.tpl b/helm-charts/splunk-otel-collector/templates/operator/_helpers.tpl index c9624376e1..635cd8b879 100644 --- a/helm-charts/splunk-otel-collector/templates/operator/_helpers.tpl +++ b/helm-charts/splunk-otel-collector/templates/operator/_helpers.tpl @@ -1,51 +1,170 @@ -{{- define "validation-rules" -}} -{{- $tracesEnabled := or (include "splunk-otel-collector.platformTracesEnabled" .) (include "splunk-otel-collector.o11yTracesEnabled" .) -}} -{{- $endpointOverridden := and .Values.operator.instrumentation.spec .Values.operator.instrumentation.spec.exporter .Values.operator.instrumentation.spec.exporter.endpoint (ne .Values.operator.instrumentation.spec.exporter.endpoint "") -}} -{{- if and .Values.operator.enabled $tracesEnabled (not $endpointOverridden) (not (default "" .Values.environment)) -}} - {{- fail "When operator.enabled=true, (splunkPlatform.tracesEnabled=true or splunkObservability.tracesEnabled=true), (agent.enabled=true or gateway.enabled=true), then environment must be a non-empty string" -}} +{{/* +Helper to ensure the correct usage of the Splunk OpenTelemetry Collector Operator. +- Checks for a valid endpoint for exporting telemetry data. +- Validates that the operator is configured correctly according to user input and default settings. +*/}} +{{- define "splunk-otel-collector.operator.validation-rules" -}} + {{- /* Check if traces are enabled either through the platform or through observability */ -}} + {{- $tracesEnabled := or + (include "splunk-otel-collector.platformTracesEnabled" .) + (include "splunk-otel-collector.o11yTracesEnabled" .) + -}} + + {{- /* Check if the endpoint is overridden in the Helm values */ -}} + {{- $endpointOverridden := and + .Values.operator.instrumentation.spec + .Values.operator.instrumentation.spec.exporter + .Values.operator.instrumentation.spec.exporter.endpoint + (ne .Values.operator.instrumentation.spec.exporter.endpoint "") + -}} + + {{- /* Validate the configuration */ -}} + {{- if and + .Values.operator.enabled + $tracesEnabled + (not $endpointOverridden) + (not (default "" .Values.environment)) + -}} + {{- fail "When operator.enabled=true, (splunkPlatform.tracesEnabled=true or splunkObservability.tracesEnabled=true), (agent.enabled=true or gateway.enabled=true), then environment must be a non-empty string" -}} + {{- end -}} {{- end -}} + +{{/* +Helper to define an endpoint for exporting telemetry data related to auto-instrumentation. +- Determines the endpoint based on user-defined values or default agent/gateway settings. +- Order of precedence: User-defined > Agent endpoint > Gateway endpoint +*/}} +{{- define "splunk-otel-collector.operator.instrumentation-exporter-endpoint" -}} + {{- /* Initialize endpoint variable */ -}} + {{- $endpoint := "" -}} + + {{- /* Use the user-defined endpoint if specified in the Helm values */ -}} + {{- if and + .Values.operator.instrumentation.spec + .Values.operator.instrumentation.spec.exporter + .Values.operator.instrumentation.spec.exporter.endpoint + (ne .Values.operator.instrumentation.spec.exporter.endpoint "") + }} + {{- $endpoint = .Values.operator.instrumentation.spec.exporter.endpoint -}} + {{- /* Use the agent endpoint if the agent is enabled */ -}} + {{- else if .Values.agent.enabled -}} + {{- $endpoint = "http://$(SPLUNK_OTEL_AGENT):4317" -}} + {{- /* Use the gateway endpoint if the gateway is enabled */ -}} + {{- else if .Values.gateway.enabled -}} + {{- $endpoint = printf "http://%s:4317" (include "splunk-otel-collector.fullname" .) -}} + {{- /* Fail if no valid endpoint is available */ -}} + {{- else -}} + {{- fail "When operator.enabled=true, (splunkPlatform.tracesEnabled=true or splunkObservability.tracesEnabled=true), either agent.enabled=true, gateway.enabled=true, or .Values.operator.instrumentation.spec.exporter.endpoint must be set" -}} + {{- end -}} + + {{- /* Return the determined endpoint */ -}} + {{- printf "%s" $endpoint -}} {{- end -}} -{{- define "splunk-otel-collector.operator.instrumentation.exporter.endpoint" -}} -{{- if and .Values.operator.instrumentation.spec .Values.operator.instrumentation.spec.exporter .Values.operator.instrumentation.spec.exporter.endpoint (ne .Values.operator.instrumentation.spec.exporter.endpoint "") }} - {{ .Values.operator.instrumentation.spec.exporter.endpoint }} -{{- else if .Values.agent.enabled }} - http://$(SPLUNK_OTEL_AGENT):4317 -{{- else if .Values.gateway.enabled }} - http://{{ include "splunk-otel-collector.fullname" . }}:4317 -{{- else -}} - {{- fail "When operator.enabled=true, (splunkPlatform.tracesEnabled=true or splunkObservability.tracesEnabled=true), either agent.enabled=true, gateway.enabled=true, or .Values.operator.instrumentation.spec.exporter.endpoint must be set" -}} -{{- end }} -{{- end }} +{{/* +Helper to define entries for instrumentation libraries. +- Iterates over user-defined and default configuration settings for each library. +- Generates a YAML configuration block for each library, containing: + - The library name. + - The image repository and tag. + - Environment variables, including special handling for 'OTEL_RESOURCE_ATTRIBUTES' and 'OTEL_EXPORTER_OTLP_ENDPOINT'. +*/}} +{{- define "splunk-otel-collector.operator.instrumentation-libraries" -}} + {{- /* Store the endpoint in a variable to avoid context changes in nested loops. */ -}} + {{- /* Helm template loops change the context, making direct access to variables in parent scopes unreliable. */ -}} + {{- $endpoint := include "splunk-otel-collector.operator.instrumentation-exporter-endpoint" $ -}} -{{- define "splunk-otel-collector.operator.instrumentation.spec-base" -}} -exporter: - endpoint: {{- include "splunk-otel-collector.operator.instrumentation.exporter.endpoint" . | nindent 4 }} -env: - {{- if .Values.agent.enabled }} - - name: SPLUNK_OTEL_AGENT - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.hostIP + {{- /* Iterate over each specified instrumentation library */ -}} + {{- if .Values.operator.instrumentation.spec -}} + {{- range $key, $value := .Values.operator.instrumentation.spec -}} + + {{- /* Check for required fields to determine if it is an instrumentation library */ -}} + {{- if and $value.repository $value.tag -}} + + {{- /* Generate YAML keys for each instrumentation library */ -}} + {{- printf "%s:" $key | indent 2 -}} + {{- printf "\n" -}} + + {{- /* Generate YAML for the image field */ -}} + {{- printf "image: %s/%s" $value.repository $value.tag | indent 4 -}} + {{- printf "\n" -}} + + {{- /* Output environment variables for the instrumentation library */ -}} + {{- printf "env:" | indent 4 -}} + {{- include "splunk-otel-collector.operator.extract-instrumentation-env" (dict "endpoint" $endpoint "key" $key "env" $value.env "repository" $value.repository "tag" $value.tag) -}} + + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Helper to convert a list of dictionaries into a list of keys. +- Iterates through a list of dictionaries and collects the 'name' field from each. +- Returns a list of these 'name' keys. +*/}} +{{- define "splunk-otel-collector.operator.extract-name-keys-from-dict-list" -}} + {{- /* Initialize variables */ -}} + {{- $listOfDicts := . -}} + {{- $keyList := list -}} + + {{- /* Collect 'name' field from each dictionary */ -}} + {{- range $listOfDicts -}} + {{- $keyList = append $keyList .name -}} + {{- end -}} + + {{- /* Return the list of 'name' keys */ -}} + {{- $keyList -}} +{{- end -}} + +{{/* +Helper for generating environment variables for each instrumentation library. +- Prioritizes user-supplied environment variables over defaults. +- For OTEL_RESOURCE_ATTRIBUTES, combines default attributes with any user-supplied values. +- For OTEL_EXPORTER_OTLP_ENDPOINT, applies special case values based on the library ('dotnet', 'python'), but user-supplied values will override these. +*/}} +{{- define "splunk-otel-collector.operator.extract-instrumentation-env" }} + {{- /* Initialize Splunk default Otel resource attribute; always included */ -}} + {{- $imageShortName := printf "%s:%s" (splitList "/" .repository | last) .tag -}} + {{- $otelResourceAttributes := printf "splunk.zc.method=%s" $imageShortName }} + + {{- /* Loop through user-supplied environment variables */ -}} + {{- range $env := .env }} + {{- if eq $env.name "OTEL_RESOURCE_ATTRIBUTES" }} + {{- $otelResourceAttributes = printf "%s,%s" $env.value $otelResourceAttributes }} + {{- else }} + {{- printf "- name: %s" $env.name | nindent 6 -}} + {{- printf " value: %s" $env.value | nindent 6 -}} + {{- end }} {{- end }} - {{- if .Values.splunkObservability.profilingEnabled }} - - name: SPLUNK_PROFILER_ENABLED - value: "true" - - name: SPLUNK_PROFILER_MEMORY_ENABLED - value: "true" + + {{- /* Output OTEL_RESOURCE_ATTRIBUTES with merged values */ -}} + {{- printf "- name: %s" "OTEL_RESOURCE_ATTRIBUTES" | nindent 6 -}} + {{- printf " value: %s" $otelResourceAttributes | nindent 6 -}} + {{- printf "\n" -}} + + {{- /* Handle custom or default exporter endpoint */ -}} + {{- $customOtelExporterEndpoint := "" }} + {{- if or (eq .key "dotnet") (eq .key "python") }} + {{- $customOtelExporterEndpoint = .endpoint | replace ":4317" ":4318" }} + {{- end }} + {{- if .env }} + {{- range $env := .env }} + {{- if eq $env.name "OTEL_EXPORTER_OTLP_ENDPOINT" }} + {{- $customOtelExporterEndpoint = $env.value }} + {{- end }} + {{- end }} + {{- end }} + + {{- /* Output final OTEL_EXPORTER_OTLP_ENDPOINT, if applicable based on input conditions */ -}} + {{- if $customOtelExporterEndpoint }} + {{- if contains "4318" $customOtelExporterEndpoint }} + {{- printf "# %s auto-instrumentation uses http/proto by default, so data must be sent to 4318 instead of 4317." .key | indent 6 -}} + {{- printf "\n" -}} + {{- printf "# See: https://github.com/open-telemetry/opentelemetry-operator#opentelemetry-auto-instrumentation-injection" | indent 6 -}} + {{- end }} + {{- printf "- name: %s" "OTEL_EXPORTER_OTLP_ENDPOINT" | nindent 6 -}} + {{- printf " value: %s" $customOtelExporterEndpoint | nindent 6 -}} + {{- printf "\n" -}} {{- end }} -{{- if include "splunk-otel-collector.operator.instrumentation.exporter.endpoint" . | toString | hasSuffix ":4317" }} -# Required if endpoint is set to 4317. -# Python and dotnet auto-instrumentation uses http/proto by default, so data must be sent to 4318 instead of 4317. -# # See: https://github.com/open-telemetry/opentelemetry-operator#opentelemetry-auto-instrumentation-injection -python: - env: - - name: OTEL_EXPORTER_OTLP_ENDPOINT - value: {{- include "splunk-otel-collector.operator.instrumentation.exporter.endpoint" . | replace ":4317" ":4318" | nindent 6 }} -dotnet: - env: - - name: OTEL_EXPORTER_OTLP_ENDPOINT - value: {{- include "splunk-otel-collector.operator.instrumentation.exporter.endpoint" . | replace ":4317" ":4318" | nindent 6 }} -{{- end }} {{- end }} diff --git a/helm-charts/splunk-otel-collector/templates/operator/instrumentation.yaml b/helm-charts/splunk-otel-collector/templates/operator/instrumentation.yaml index 8aeee52d39..537647fa6d 100644 --- a/helm-charts/splunk-otel-collector/templates/operator/instrumentation.yaml +++ b/helm-charts/splunk-otel-collector/templates/operator/instrumentation.yaml @@ -1,5 +1,5 @@ {{- if .Values.operator.enabled }} -{{- include "validation-rules" . -}} +{{- include "splunk-otel-collector.operator.validation-rules" . -}} apiVersion: opentelemetry.io/v1alpha1 kind: Instrumentation metadata: @@ -13,6 +13,35 @@ metadata: heritage: {{ .Release.Service }} app.kubernetes.io/component: otel-operator spec: -{{- $spec := include "splunk-otel-collector.operator.instrumentation.spec-base" . | fromYaml }} -{{- .Values.operator.instrumentation.spec | mustMergeOverwrite $spec | toYaml | nindent 4 }} + exporter: + endpoint: {{ include "splunk-otel-collector.operator.instrumentation-exporter-endpoint" . }} + propagators: + - tracecontext + - baggage + - b3 + {{- if .Values.operator.instrumentation.spec.sampler }} + {{ include .Values.operator.instrumentation.spec.sampler }} + {{- end }} + env: + {{- if .Values.operator.instrumentation.spec.env }} + {{- include .Values.operator.instrumentation.spec.env }} + {{- end }} + {{- if .Values.splunkObservability.profilingEnabled }} + {{- if not hasKey (include "splunk-otel-collector.operator.extract-name-keys-from-dict-list" .Values.operator.instrumentation.spec.env) "SPLUNK_PROFILER_ENABLED" }} + - name: SPLUNK_PROFILER_ENABLED + value: "true" + {{- end }} + {{- if not hasKey (include "splunk-otel-collector.operator.extract-name-keys-from-dict-list" .Values.operator.instrumentation.spec.env) "SPLUNK_PROFILER_MEMORY_ENABLED" }} + - name: SPLUNK_PROFILER_MEMORY_ENABLED + value: "true" + {{- end }} + {{- end }} + {{- if .Values.agent.enabled }} + - name: SPLUNK_OTEL_AGENT + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + {{- end }} +{{ include "splunk-otel-collector.operator.instrumentation-libraries" . }} {{- end }} diff --git a/helm-charts/splunk-otel-collector/values.yaml b/helm-charts/splunk-otel-collector/values.yaml index c5d65aa7b8..5b791aeb75 100644 --- a/helm-charts/splunk-otel-collector/values.yaml +++ b/helm-charts/splunk-otel-collector/values.yaml @@ -1401,10 +1401,6 @@ operator: # must be overridden here. # exporter: # endpoint: http://$(SPLUNK_OTEL_AGENT):4317 - propagators: - - tracecontext - - baggage - - b3 # Optional "sampler" parameter for enabling trace sampling, see: https://opentelemetry.io/docs/concepts/sdk-configuration/general-sdk-configuration/#otel_traces_sampler # sampler: # type: traceidratio @@ -1412,25 +1408,38 @@ operator: # Optional "environment variable" parameters that can configure all instrumentation libraries. # If splunkObservability.profilingEnabled=true, environment variables enabling profiling will be added automatically. # env: + # Auto-instrumentation Libraries (Start) + # Below are configurations for the instrumentation libraries utilized in Auto-instrumentation. + # Highlights: + # - Maturity varies among libraries (e.g., Java is more mature than Go). Check each library's stability here: https://opentelemetry.io/docs/instrumentation/#status-and-releases + # - Some libraries may be enabled by default. The current status can be checked here: https://github.com/open-telemetry/opentelemetry-operator#controlling-instrumentation-capabilities + # - Splunk provides best-effort support for native OpenTelemetry libraries, while offering full support for its own distributions. + # Each library supports the following fields: + # - repository: Specifies the Docker image repository. + # - tag: Indicates the Docker image tag. + # - env: (Optional) Allows you to add any additional environment variables. java: - image: ghcr.io/signalfx/splunk-otel-java/splunk-otel-java:v1.28.0 - # OpenTelemetry Instrumentation Libraries - # For default version values, see: https://github:com/open-telemetry/opentelemetry-operator/releases - # apacheHttpd: - # image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd:X.X.X - # dotnet: - # Port 4318 is used for dotnet instead of 4317 if the default endpoint value is not overridden, see: https://github.com/open-telemetry/opentelemetry-operator#opentelemetry-auto-instrumentation-injection - # image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:X.X.X - # go: - # image: ghcr.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:X.X.X - # nodejs: - # image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:X.X.X - # python: - # Port 4318 is used for python instead of 4317 if the default endpoint value is not overridden, see: https://github.com/open-telemetry/opentelemetry-operator#opentelemetry-auto-instrumentation-injection - # image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:X.X.X + repository: ghcr.io/signalfx/splunk-otel-java/splunk-otel-java + tag: v1.27.0 + nodejs: + repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs + tag: 0.41.1 + go: + repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-go + tag: v0.2.2-alpha + apache-httpd: + repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd + tag: 1.0.3 + python: + repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python + tag: 0.40b0 + dotnet: + repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet + tag: 1.0.0-rc.2 + # Auto-instrumentation Libraries (End) admissionWebhooks: certManager: - # The cert and issuer are annoted so they are instantiated after the cert-manager CRDs are installed. + # The cert and issuer are annotated so they are instantiated after the cert-manager CRDs are installed. certificateAnnotations: "helm.sh/hook": post-install,post-upgrade "helm.sh/hook-weight": "1" From 55b951cce8c87623c646ba98d4f28fd89911e25a Mon Sep 17 00:00:00 2001 From: jvoravong Date: Mon, 11 Sep 2023 14:20:08 -0600 Subject: [PATCH 2/5] Add documentation to NOTES.txt --- helm-charts/splunk-otel-collector/templates/NOTES.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/helm-charts/splunk-otel-collector/templates/NOTES.txt b/helm-charts/splunk-otel-collector/templates/NOTES.txt index 7aeab77315..92ff19ea2a 100644 --- a/helm-charts/splunk-otel-collector/templates/NOTES.txt +++ b/helm-charts/splunk-otel-collector/templates/NOTES.txt @@ -85,3 +85,9 @@ Splunk Network Explorer is installed and configured. Please ensure that "logsEngine" parameter is explicitly set to "fluentd" or "otel" to avoid unexpected changes during upgrade. More details: https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#logs-collection {{ end }} +{{- if .Values.operator.enabled }} +[INFO] You've enabled the operator's auto-instrumentation feature (operator.enabled=true), currently considered ALPHA. + - Instrumentation library maturity varies (e.g., Java is more mature than Go). For library stability, visit: https://opentelemetry.io/docs/instrumentation/#status-and-releases + - Some libraries may be enabled by default. For current status, see: https://github.com/open-telemetry/opentelemetry-operator#controlling-instrumentation-capabilities + - Splunk provides best-effort support for native OpenTelemetry libraries, and full support for Splunk library distributions. For used libraries, refer to the values.yaml under "operator.instrumentation.spec". +{{- end }} From 6a219dd2aa26d9e93a332140ae15e5d2cc105fdb Mon Sep 17 00:00:00 2001 From: jvoravong Date: Mon, 11 Sep 2023 14:20:21 -0600 Subject: [PATCH 3/5] Add an entry to CHANGELOG.md and UPGRADING.md --- CHANGELOG.md | 4 ++++ UPGRADING.md | 31 +++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 568997b088..18da71493d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Changed - Update Splunk Fluend HEC docker image to v1.3.3 [#924](https://github.com/signalfx/splunk-otel-collector-chart/pull/924) +- Add ability to update and track operator auto-instrumentation images [#917](https://github.com/signalfx/splunk-otel-collector-chart/pull/917) + - [BREAKING CHANGE] Refactored auto-instrumentation image definition from operator.instrumentation.spec.{library}.image + to operator.instrumentation.spec.{library}.repository and operator.instrumentation.spec.{library}.tag. + See [upgrade guidelines](https://github.com/signalfx/splunk-otel-collector-chart/blob/main/UPGRADING.md#0840-0850) ## [0.84.0] - 2023-09-11 diff --git a/UPGRADING.md b/UPGRADING.md index ce9e5d318f..5275410f46 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -1,5 +1,36 @@ # Upgrade guidelines +## 0.84.0 to 0.85.0 + +The format for defining auto-instrumentation images has been refactored. Previously, the image was +defined using the `operator.instrumentation.spec.{library}.image` format. This has been changed to +separate the repository and tag into two distinct fields: `operator.instrumentation.spec.{library}.repository` +and `operator.instrumentation.spec.{library}.tag`. + +If you were defining a custom image under `operator.instrumentation.spec.{library}.image`, update +your `values.yaml` to accommodate this change. + +- Before: + +```yaml +operator: + instrumentation: + spec: + java: + image: ghcr.io/custom-owner/splunk-otel-java/custom-splunk-otel-java:v1.27.0 +``` + +- After: + +```yaml +operator: + instrumentation: + spec: + java: + repository: ghcr.io/custom-owner/splunk-otel-java/custom-splunk-otel-java + tag: v1.27.0 +``` + ## 0.67.0 to 0.68.0 There is a new receiver: [Kubernetes Objects Receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8sobjectsreceiver) that can pull or watch any object from Kubernetes API server. From f1a2f5eb2d27dcea4e64886dfbb207d3375d8f7e Mon Sep 17 00:00:00 2001 From: jvoravong Date: Tue, 19 Sep 2023 09:18:12 -0600 Subject: [PATCH 4/5] patch - fix merge conflicts, add support special instlib naming case ("apache-httpd" "apacheHttpd") --- .../rendered_manifests/operator/instrumentation.yaml | 2 +- .../templates/operator/_helpers.tpl | 12 ++++++++---- helm-charts/splunk-otel-collector/values.yaml | 2 +- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml index d571f9d10c..6437f7ddad 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml @@ -29,7 +29,7 @@ spec: fieldRef: apiVersion: v1 fieldPath: status.hostIP - apache-httpd: + apacheHttpd: image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd/1.0.3 env: - name: OTEL_RESOURCE_ATTRIBUTES diff --git a/helm-charts/splunk-otel-collector/templates/operator/_helpers.tpl b/helm-charts/splunk-otel-collector/templates/operator/_helpers.tpl index 635cd8b879..758e72b591 100644 --- a/helm-charts/splunk-otel-collector/templates/operator/_helpers.tpl +++ b/helm-charts/splunk-otel-collector/templates/operator/_helpers.tpl @@ -73,16 +73,20 @@ Helper to define entries for instrumentation libraries. {{- /* Store the endpoint in a variable to avoid context changes in nested loops. */ -}} {{- /* Helm template loops change the context, making direct access to variables in parent scopes unreliable. */ -}} {{- $endpoint := include "splunk-otel-collector.operator.instrumentation-exporter-endpoint" $ -}} + {{- /* Define a map (versions.txt name -> Instrumentation spec name) for instrumentation library names */ -}} + {{- /* This is a simple workaround to accommodate one unique case that should be removed in the future */ -}} + {{- $instLibAliases := dict "apache-httpd" "apacheHttpd" -}} {{- /* Iterate over each specified instrumentation library */ -}} {{- if .Values.operator.instrumentation.spec -}} {{- range $key, $value := .Values.operator.instrumentation.spec -}} + {{- $instLibName := get $instLibAliases $key | default $key -}} {{- /* Check for required fields to determine if it is an instrumentation library */ -}} {{- if and $value.repository $value.tag -}} {{- /* Generate YAML keys for each instrumentation library */ -}} - {{- printf "%s:" $key | indent 2 -}} + {{- printf "%s:" $instLibName | indent 2 -}} {{- printf "\n" -}} {{- /* Generate YAML for the image field */ -}} @@ -91,7 +95,7 @@ Helper to define entries for instrumentation libraries. {{- /* Output environment variables for the instrumentation library */ -}} {{- printf "env:" | indent 4 -}} - {{- include "splunk-otel-collector.operator.extract-instrumentation-env" (dict "endpoint" $endpoint "key" $key "env" $value.env "repository" $value.repository "tag" $value.tag) -}} + {{- include "splunk-otel-collector.operator.extract-instrumentation-env" (dict "endpoint" $endpoint "instLibName" $instLibName "env" $value.env "repository" $value.repository "tag" $value.tag) -}} {{- end -}} {{- end -}} @@ -145,7 +149,7 @@ Helper for generating environment variables for each instrumentation library. {{- /* Handle custom or default exporter endpoint */ -}} {{- $customOtelExporterEndpoint := "" }} - {{- if or (eq .key "dotnet") (eq .key "python") }} + {{- if or (eq .instLibName "dotnet") (eq .instLibName "python") }} {{- $customOtelExporterEndpoint = .endpoint | replace ":4317" ":4318" }} {{- end }} {{- if .env }} @@ -159,7 +163,7 @@ Helper for generating environment variables for each instrumentation library. {{- /* Output final OTEL_EXPORTER_OTLP_ENDPOINT, if applicable based on input conditions */ -}} {{- if $customOtelExporterEndpoint }} {{- if contains "4318" $customOtelExporterEndpoint }} - {{- printf "# %s auto-instrumentation uses http/proto by default, so data must be sent to 4318 instead of 4317." .key | indent 6 -}} + {{- printf "# %s auto-instrumentation uses http/proto by default, so data must be sent to 4318 instead of 4317." .instLibName | indent 6 -}} {{- printf "\n" -}} {{- printf "# See: https://github.com/open-telemetry/opentelemetry-operator#opentelemetry-auto-instrumentation-injection" | indent 6 -}} {{- end }} diff --git a/helm-charts/splunk-otel-collector/values.yaml b/helm-charts/splunk-otel-collector/values.yaml index 5b791aeb75..1402f9a5d8 100644 --- a/helm-charts/splunk-otel-collector/values.yaml +++ b/helm-charts/splunk-otel-collector/values.yaml @@ -1439,7 +1439,7 @@ operator: # Auto-instrumentation Libraries (End) admissionWebhooks: certManager: - # The cert and issuer are annotated so they are instantiated after the cert-manager CRDs are installed. + # Annotate the certificate and issuer to ensure they are created after the cert-manager CRDs have been installed. certificateAnnotations: "helm.sh/hook": post-install,post-upgrade "helm.sh/hook-weight": "1" From b8565ed6d0d66726b843a5f14fbb1fa5d43248a5 Mon Sep 17 00:00:00 2001 From: jvoravong Date: Tue, 19 Sep 2023 15:05:17 -0600 Subject: [PATCH 5/5] patch - add small final updates/fixes with latest testing --- .../README.md | 136 ++++++++++-------- .../operator/instrumentation.yaml | 12 +- .../spring-petclinic/spring-petclinic.yaml | 50 ++++--- .../update-demo.sh | 41 ++++-- .../templates/operator/_helpers.tpl | 2 +- 5 files changed, 145 insertions(+), 96 deletions(-) diff --git a/examples/enable-operator-and-auto-instrumentation/README.md b/examples/enable-operator-and-auto-instrumentation/README.md index f66a6066c5..2c322fac12 100644 --- a/examples/enable-operator-and-auto-instrumentation/README.md +++ b/examples/enable-operator-and-auto-instrumentation/README.md @@ -12,8 +12,7 @@ If you have your own Java application you want to instrument, you can still use to instrument your application. ```bash -kubectl create namespace spring-petclinic -curl https://raw.githubusercontent.com/signalfx/splunk-otel-collector-chart/main/examples/enable-operator-and-auto-instrumentation/spring-petclinic/spring-petclinic.yaml | kubectl apply -n spring-petclinic -f - +curl https://raw.githubusercontent.com/signalfx/splunk-otel-collector-chart/main/examples/enable-operator-and-auto-instrumentation/spring-petclinic/spring-petclinic.yaml | kubectl apply -f - ``` ### 2. Complete the steps outlined in [Getting started with auto-instrumentation](../../docs/auto-instrumentation-install.md#steps-for-setting-up-auto-instrumentation) @@ -37,88 +36,107 @@ helm install splunk-otel-collector -f ./my_values.yaml --set operator.enabled=tr
Expand for kubectl commands to run and output -``` +```bash kubectl get pods -# NAME READY STATUS RESTARTS AGE -# splunk-otel-collector-agent-9ccgn 2/2 Running 0 3m -# splunk-otel-collector-agent-ft4xc 2/2 Running 0 3m -# splunk-otel-collector-k8s-cluster-receiver-56f7c9cf5b-mgsbj 1/1 Running 0 3m -# splunk-otel-collector-operator-6dffc898df-5jjkp 2/2 Running 0 3m +# NAME READY STATUS RESTARTS AGE +# splunk-otel-collector-agent-2mtfn 2/2 Running 0 5m +# splunk-otel-collector-agent-k4gc8 2/2 Running 0 5m +# splunk-otel-collector-agent-wjt98 2/2 Running 0 5m +# splunk-otel-collector-certmanager-69b98cc84d-2vzl7 1/1 Running 0 5m +# splunk-otel-collector-certmanager-cainjector-76db6dcbbf-4625c 1/1 Running 0 5m +# splunk-otel-collector-certmanager-webhook-bc68cd487-dctrf 1/1 Running 0 5m +# splunk-otel-collector-k8s-cluster-receiver-8449bfdc8-hhbvz 1/1 Running 0 5m +# splunk-otel-collector-operator-754c9d78f8-9ztwg 2/2 Running 0 5m +# spring-petclinic-admin-server-55fb6cfc64-gwj8q 1/1 Running 0 5m +# spring-petclinic-api-gateway-7dfb6f4c88-zstv9 1/1 Running 0 5m +# spring-petclinic-config-server-55c5d7d69b-hhdn8 1/1 Running 0 5m +# spring-petclinic-customers-service-65b64444bc-n7b4p 1/1 Running 0 5m +# spring-petclinic-discovery-server-78fbb87b65-tw98n 1/1 Running 0 5m +# spring-petclinic-vets-service-75bc75b8d-sx7gd 1/1 Running 0 5m +# spring-petclinic-visits-service-7568c748f5-c5jmz 1/1 Running 0 5m kubectl get mutatingwebhookconfiguration.admissionregistration.k8s.io # NAME WEBHOOKS AGE # splunk-otel-collector-certmanager-webhooh 1 8m # splunk-otel-collector-operator-mutation 3 2m -kubectl get pods -n spring-petclinic -# NAME READY STATUS RESTARTS AGE -# admin-server-75d7f4b777-kwq74 1/1 Running 0 2m -# api-gateway-649cc9c68c-9g85j 1/1 Running 0 2m -# config-server-6f7dc87c5f-l8wf5 1/1 Running 0 2m -# customers-service-64c4f875d-m5m64 1/1 Running 0 2m -# discovery-server-65b6d569d6-pf9t6 1/1 Running 0 2m -# vets-service-89b55685c-m9pp5 1/1 Running 0 2m -# visits-service-9689c7b96-k4nm2 1/1 Running 0 2m - -kubectl get otelinst -n spring-petclinic -# NAME AGE ENDPOINT -# splunk-instrumentation-java 3m http://$(SPLUNK_OTEL_AGENT):4317 +kubectl get otelinst +# NAME AGE ENDPOINT +# splunk-otel-collector 5m http://$(SPLUNK_OTEL_AGENT):4317 ```
-#### 2.3 Instrument application by setting an annotation +#### 2.3 Instrument Application by Setting an Annotation -The required annotation can exist on pods or namespaces. Depending on the variety of applications you are instrumenting, -you may want to use either of these options. We show how to annotate pods and namespaces below. +Depending on the variety of applications you are instrumenting, you may want to use different scopes for annotations. This step shows how to annotate namespaces and individual pods. -If all the applications within the target namespace are of one type (like only Java), then annotating the namespace -would be appropriate. +**Recommended Method** -
-Expand for commands to run to add the annotation to a namespace +This is the most recommended way if your applications are of multiple types (like Java, Python, NodeJS) and you want to control annotations at the pod level. +```bash +# Patch all the deployments (labeled with 'app.kubernetes.io/part-of=spring-petclinic) to add the inject annotation. +# This automatically causes pods to restart. +kubectl get deployments -l app.kubernetes.io/part-of=spring-petclinic -o name | xargs -I % kubectl patch % -p "{\"spec\": {\"template\":{\"metadata\":{\"annotations\":{\"instrumentation.opentelemetry.io/inject-java\":\"true\"}}}}}" ``` -# The pods existing in the spring-petclinic namespace will be instrumented after they are annotated and restarted by the user. -kubectl patch namespace spring-petclinic -p '{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}' -# In non-test environments, users should use commands like "kubectl rollout restart" to restart pods. -# In a test environment, one can quickly restart all the pods like this. -kubectl delete --all pods -n spring-petclinic +```bash +# To disable instrumentation, remove the annotation or set its value to 'false' +kubectl get deployments -l app.kubernetes.io/part-of=spring-petclinic -o name | xargs -I % kubectl patch % -p "{\"spec\": {\"template\":{\"metadata\":{\"annotations\":{\"instrumentation.opentelemetry.io/inject-java\":\"false\"}}}}}" -# If you need to disable instrumentation, remove the annotation or set the annotation value to false. -kubectl patch namespace spring-petclinic -p '{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}' ``` -If the applications within the target namespace are of several types (like Java, Python, NodeJS), then annotating the -pods would be appropriate. This example only contains Java applications, but we will still demonstrate how to -instrument pods by updating the deployments that created them. - -
+**Other Methods**
-Expand for commands to run to add the annotation to pods by updating the deployment +Expand for commands to run to add the annotation at other levels + +##### Namespace Annotation + +If all the applications within the target namespace are of one type, annotating the namespace is appropriate. +For example, if you have a namespace called `spring-petclinic` where only Java-based applications run, this could be useful. + +```bash +# Annotate the 'spring-petclinic' namespace +kubectl patch namespace spring-petclinic -p '{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}' +# To apply the changes, you may need to restart the existing pods +kubectl delete --all pods --namespace spring-petclinic +``` + +```bash +# To disable instrumentation, remove the annotation or set its value to 'false' +kubectl patch namespace spring-petclinic -p '{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}' +# To apply the changes, you may need to restart the existing pods +kubectl delete --all pods --namespace spring-petclinic +``` + +##### Deployment Templates + +Use this method if you need specific control over which deployments are instrumented. +```bash +# Patch all spring-petclinic deployments +kubectl patch deployment spring-petclinic-admin-server -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' +kubectl patch deployment spring-petclinic-api-gateway -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' +kubectl patch deployment spring-petclinic-config-server -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' +kubectl patch deployment spring-petclinic-customers-service -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' +kubectl patch deployment spring-petclinic-vets-service -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' +kubectl patch deployment spring-petclinic-discovery-server -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' +kubectl patch deployment spring-petclinic-visits-service -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' ``` -# The pods will be instrumented after the related deploymented is patched with the annotation. Patching a deployment automatically causes pods to restart. -kubectl patch deployment admin-server -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' -n spring-petclinic -kubectl patch deployment api-gateway -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' -n spring-petclinic -kubectl patch deployment config-server -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' -n spring-petclinic -kubectl patch deployment customers-service -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' -n spring-petclinic -kubectl patch deployment vets-service -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' -n spring-petclinic -kubectl patch deployment discovery-server -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' -n spring-petclinic -kubectl patch deployment visits-service -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"true"}}}} }' -n spring-petclinic - -# If you need to disable instrumentation, remove the annotation or set the annotation value to false. -kubectl patch deployment admin-server -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' -n spring-petclinic -kubectl patch deployment api-gateway -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' -n spring-petclinic -kubectl patch deployment config-server -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' -n spring-petclinic -kubectl patch deployment customers-service -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' -n spring-petclinic -kubectl patch deployment vets-service -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' -n spring-petclinic -kubectl patch deployment discovery-server -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' -n spring-petclinic -kubectl patch deployment visits-service -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' -n spring-petclinic +```bash +# To disable instrumentation, remove the annotation or set its value to 'false' +kubectl patch deployment spring-petclinic-admin-server -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' +kubectl patch deployment spring-petclinic-api-gateway -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' +kubectl patch deployment spring-petclinic-config-server -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' +kubectl patch deployment spring-petclinic-customers-service -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' +kubectl patch deployment spring-petclinic-vets-service -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' +kubectl patch deployment spring-petclinic-discovery-server -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' +kubectl patch deployment spring-petclinic-visits-service -p '{"spec": {"template":{"metadata":{"annotations":{"instrumentation.opentelemetry.io/inject-java":"false"}}}} }' ``` +
You can verify instrumentation was successful on an individual pod with. Check that these bullet points are @@ -130,7 +148,7 @@ true for the instrumented pod using the command below. Expand for commands to run to verify instrumentation ``` -kubectl describe pod spring-petclinic-9d5bc5fff-5r5gr -n spring-petclinic +kubectl describe pod spring-petclinic-9d5bc5fff-5r5gr # Name: spring-petclinic-9d5bc5fff-5r5gr # Namespace: spring-petclinic # Annotations: instrumentation.opentelemetry.io/inject-java: true diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml index 6437f7ddad..4b43a4a68c 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml @@ -30,12 +30,12 @@ spec: apiVersion: v1 fieldPath: status.hostIP apacheHttpd: - image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd/1.0.3 + image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd:1.0.3 env: - name: OTEL_RESOURCE_ATTRIBUTES value: splunk.zc.method=autoinstrumentation-apache-httpd:1.0.3 dotnet: - image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet/1.0.0-rc.2 + image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0-rc.2 env: - name: OTEL_RESOURCE_ATTRIBUTES value: splunk.zc.method=autoinstrumentation-dotnet:1.0.0-rc.2 @@ -44,22 +44,22 @@ spec: - name: OTEL_EXPORTER_OTLP_ENDPOINT value: http://$(SPLUNK_OTEL_AGENT):4318 go: - image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-go/v0.2.2-alpha + image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-go:v0.2.2-alpha env: - name: OTEL_RESOURCE_ATTRIBUTES value: splunk.zc.method=autoinstrumentation-go:v0.2.2-alpha java: - image: ghcr.io/signalfx/splunk-otel-java/splunk-otel-java/v1.27.0 + image: ghcr.io/signalfx/splunk-otel-java/splunk-otel-java:v1.27.0 env: - name: OTEL_RESOURCE_ATTRIBUTES value: splunk.zc.method=splunk-otel-java:v1.27.0 nodejs: - image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs/0.41.1 + image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1 env: - name: OTEL_RESOURCE_ATTRIBUTES value: splunk.zc.method=autoinstrumentation-nodejs:0.41.1 python: - image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python/0.40b0 + image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0 env: - name: OTEL_RESOURCE_ATTRIBUTES value: splunk.zc.method=autoinstrumentation-python:0.40b0 diff --git a/examples/enable-operator-and-auto-instrumentation/spring-petclinic/spring-petclinic.yaml b/examples/enable-operator-and-auto-instrumentation/spring-petclinic/spring-petclinic.yaml index 6756c98d93..46d91dc255 100644 --- a/examples/enable-operator-and-auto-instrumentation/spring-petclinic/spring-petclinic.yaml +++ b/examples/enable-operator-and-auto-instrumentation/spring-petclinic/spring-petclinic.yaml @@ -5,6 +5,7 @@ metadata: creationTimestamp: null labels: io.kompose.service: admin-server + app.kubernetes.io/part-of: spring-petclinic name: admin-server spec: ports: @@ -15,7 +16,6 @@ spec: io.kompose.service: admin-server status: loadBalancer: {} - --- apiVersion: v1 kind: Service @@ -23,6 +23,7 @@ metadata: creationTimestamp: null labels: io.kompose.service: api-gateway + app.kubernetes.io/part-of: spring-petclinic name: api-gateway spec: ports: @@ -33,7 +34,6 @@ spec: io.kompose.service: api-gateway status: loadBalancer: {} - --- apiVersion: v1 kind: Service @@ -41,6 +41,7 @@ metadata: creationTimestamp: null labels: io.kompose.service: config-server + app.kubernetes.io/part-of: spring-petclinic name: config-server spec: ports: @@ -51,7 +52,6 @@ spec: io.kompose.service: config-server status: loadBalancer: {} - --- apiVersion: v1 kind: Service @@ -59,6 +59,7 @@ metadata: creationTimestamp: null labels: io.kompose.service: customers-service + app.kubernetes.io/part-of: spring-petclinic name: customers-service spec: ports: @@ -69,7 +70,6 @@ spec: io.kompose.service: customers-service status: loadBalancer: {} - --- apiVersion: v1 kind: Service @@ -77,6 +77,7 @@ metadata: creationTimestamp: null labels: io.kompose.service: discovery-server + app.kubernetes.io/part-of: spring-petclinic name: discovery-server spec: ports: @@ -87,7 +88,6 @@ spec: io.kompose.service: discovery-server status: loadBalancer: {} - --- apiVersion: v1 kind: Service @@ -95,6 +95,7 @@ metadata: creationTimestamp: null labels: io.kompose.service: vets-service + app.kubernetes.io/part-of: spring-petclinic name: vets-service spec: ports: @@ -105,7 +106,6 @@ spec: io.kompose.service: vets-service status: loadBalancer: {} - --- apiVersion: v1 kind: Service @@ -113,6 +113,7 @@ metadata: creationTimestamp: null labels: io.kompose.service: visits-service + app.kubernetes.io/part-of: spring-petclinic name: visits-service spec: ports: @@ -123,7 +124,6 @@ spec: io.kompose.service: visits-service status: loadBalancer: {} - --- apiVersion: apps/v1 kind: Deployment @@ -131,7 +131,8 @@ metadata: creationTimestamp: null labels: io.kompose.service: admin-server - name: admin-server + app.kubernetes.io/part-of: spring-petclinic + name: spring-petclinic-admin-server spec: replicas: 1 selector: @@ -144,6 +145,7 @@ spec: labels: io.kompose.network/spring-petclinic-default: "true" io.kompose.service: admin-server + app.kubernetes.io/part-of: spring-petclinic spec: containers: - command: @@ -162,7 +164,6 @@ spec: memory: "536870912" restartPolicy: Always status: {} - --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy @@ -178,7 +179,6 @@ spec: podSelector: matchLabels: io.kompose.network/spring-petclinic-default: "true" - --- apiVersion: apps/v1 kind: Deployment @@ -186,7 +186,8 @@ metadata: creationTimestamp: null labels: io.kompose.service: api-gateway - name: api-gateway + app.kubernetes.io/part-of: spring-petclinic + name: spring-petclinic-api-gateway spec: replicas: 1 selector: @@ -199,6 +200,7 @@ spec: labels: io.kompose.network/spring-petclinic-default: "true" io.kompose.service: api-gateway + app.kubernetes.io/part-of: spring-petclinic spec: containers: - command: @@ -217,7 +219,6 @@ spec: memory: "536870912" restartPolicy: Always status: {} - --- apiVersion: apps/v1 kind: Deployment @@ -225,7 +226,8 @@ metadata: creationTimestamp: null labels: io.kompose.service: config-server - name: config-server + app.kubernetes.io/part-of: spring-petclinic + name: spring-petclinic-config-server spec: replicas: 1 selector: @@ -238,6 +240,7 @@ spec: labels: io.kompose.network/spring-petclinic-default: "true" io.kompose.service: config-server + app.kubernetes.io/part-of: spring-petclinic spec: containers: - image: springcommunity/spring-petclinic-config-server @@ -249,7 +252,6 @@ spec: memory: "536870912" restartPolicy: Always status: {} - --- apiVersion: apps/v1 kind: Deployment @@ -257,7 +259,8 @@ metadata: creationTimestamp: null labels: io.kompose.service: customers-service - name: customers-service + app.kubernetes.io/part-of: spring-petclinic + name: spring-petclinic-customers-service spec: replicas: 1 selector: @@ -270,6 +273,7 @@ spec: labels: io.kompose.network/spring-petclinic-default: "true" io.kompose.service: customers-service + app.kubernetes.io/part-of: spring-petclinic spec: containers: - command: @@ -288,7 +292,6 @@ spec: memory: "536870912" restartPolicy: Always status: {} - --- apiVersion: apps/v1 kind: Deployment @@ -296,7 +299,8 @@ metadata: creationTimestamp: null labels: io.kompose.service: discovery-server - name: discovery-server + app.kubernetes.io/part-of: spring-petclinic + name: spring-petclinic-discovery-server spec: replicas: 1 selector: @@ -309,6 +313,7 @@ spec: labels: io.kompose.network/spring-petclinic-default: "true" io.kompose.service: discovery-server + app.kubernetes.io/part-of: spring-petclinic spec: containers: - command: @@ -327,7 +332,6 @@ spec: memory: "536870912" restartPolicy: Always status: {} - --- apiVersion: apps/v1 kind: Deployment @@ -335,7 +339,8 @@ metadata: creationTimestamp: null labels: io.kompose.service: vets-service - name: vets-service + app.kubernetes.io/part-of: spring-petclinic + name: spring-petclinic-vets-service spec: replicas: 1 selector: @@ -348,6 +353,7 @@ spec: labels: io.kompose.network/spring-petclinic-default: "true" io.kompose.service: vets-service + app.kubernetes.io/part-of: spring-petclinic spec: containers: - command: @@ -366,7 +372,6 @@ spec: memory: "536870912" restartPolicy: Always status: {} - --- apiVersion: apps/v1 kind: Deployment @@ -374,7 +379,8 @@ metadata: creationTimestamp: null labels: io.kompose.service: visits-service - name: visits-service + app.kubernetes.io/part-of: spring-petclinic + name: spring-petclinic-visits-service spec: replicas: 1 selector: @@ -387,6 +393,7 @@ spec: labels: io.kompose.network/spring-petclinic-default: "true" io.kompose.service: visits-service + app.kubernetes.io/part-of: spring-petclinic spec: containers: - command: @@ -405,4 +412,3 @@ spec: memory: "536870912" restartPolicy: Always status: {} - diff --git a/examples/enable-operator-and-auto-instrumentation/update-demo.sh b/examples/enable-operator-and-auto-instrumentation/update-demo.sh index faadbe43cc..4d3662daf1 100644 --- a/examples/enable-operator-and-auto-instrumentation/update-demo.sh +++ b/examples/enable-operator-and-auto-instrumentation/update-demo.sh @@ -1,13 +1,38 @@ #!/usr/bin/env bash # Updates the spring-petclinic demo application with the latest upstream changes -# yq and kompose are required to run this + +# Requirements: yq and kompose # brew install yq # brew install kompose -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -curl -L wget https://raw.githubusercontent.com/spring-petclinic/spring-petclinic-microservices/master/docker-compose.yml > $SCRIPT_DIR/spring-petclinic/docker-compose.yaml + +# Set default paths if environment variables are not set +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd) +DOCKER_COMPOSE_PATH=${DOCKER_COMPOSE_PATH:-"$SCRIPT_DIR/spring-petclinic/docker-compose.yaml"} +SPRING_PETCLINIC_PATH=${SPRING_PETCLINIC_PATH:-"$SCRIPT_DIR/spring-petclinic/spring-petclinic.yaml"} + +# Download the docker-compose file +curl -L https://raw.githubusercontent.com/spring-petclinic/spring-petclinic-microservices/master/docker-compose.yml \ + > "$DOCKER_COMPOSE_PATH" + # Delete extra servers to minimize resource usage -yq -i 'del(.services.grafana-server)' $SCRIPT_DIR/spring-petclinic/docker-compose.yaml -yq -i 'del(.services.prometheus-server)' $SCRIPT_DIR/spring-petclinic/docker-compose.yaml -yq -i 'del(.services.tracing-server)' $SCRIPT_DIR/spring-petclinic/docker-compose.yaml -kompose convert --file=$SCRIPT_DIR/spring-petclinic/docker-compose.yaml --out=$SCRIPT_DIR/spring-petclinic/spring-petclinic.yaml --with-kompose-annotation=false -rm -rf $SCRIPT_DIR/spring-petclinic/docker-compose.yaml +yq -i 'del(.services.grafana-server)' "$DOCKER_COMPOSE_PATH" +yq -i 'del(.services.prometheus-server)' "$DOCKER_COMPOSE_PATH" +yq -i 'del(.services.tracing-server)' "$DOCKER_COMPOSE_PATH" + +# Convert docker-compose to Kubernetes YAML and add label +kompose convert \ + --file="$DOCKER_COMPOSE_PATH" \ + --out="$SPRING_PETCLINIC_PATH" \ + --with-kompose-annotation=false + +# Add prefix 'spring-petclinic-' to Deployment names +yq eval -i 'select(.kind == "Deployment") .metadata.name |= "spring-petclinic-" + .' "$SPRING_PETCLINIC_PATH" +# Add 'app.kubernetes.io/part-of = spring-petclinic' label to Services, Deployments, and Pods +yq eval -i 'select(.kind == "Service") .metadata.labels += {"app.kubernetes.io/part-of": "spring-petclinic"}' "$SPRING_PETCLINIC_PATH" +yq eval -i 'select(.kind == "Deployment") .metadata.labels += {"app.kubernetes.io/part-of": "spring-petclinic"}' "$SPRING_PETCLINIC_PATH" +yq eval -i 'select(.kind == "Deployment") .spec.template.metadata.labels += {"app.kubernetes.io/part-of": "spring-petclinic"}' "$SPRING_PETCLINIC_PATH" + +# Remove the downloaded docker-compose file +rm -rf "$DOCKER_COMPOSE_PATH" + +echo "Conversion and label addition completed!" diff --git a/helm-charts/splunk-otel-collector/templates/operator/_helpers.tpl b/helm-charts/splunk-otel-collector/templates/operator/_helpers.tpl index 758e72b591..4aa30cc940 100644 --- a/helm-charts/splunk-otel-collector/templates/operator/_helpers.tpl +++ b/helm-charts/splunk-otel-collector/templates/operator/_helpers.tpl @@ -90,7 +90,7 @@ Helper to define entries for instrumentation libraries. {{- printf "\n" -}} {{- /* Generate YAML for the image field */ -}} - {{- printf "image: %s/%s" $value.repository $value.tag | indent 4 -}} + {{- printf "image: %s:%s" $value.repository $value.tag | indent 4 -}} {{- printf "\n" -}} {{- /* Output environment variables for the instrumentation library */ -}}