From fef0dfdf4170706e8666e9214790d5a05891fbe4 Mon Sep 17 00:00:00 2001 From: Florian Bacher Date: Thu, 28 Sep 2023 08:13:22 +0200 Subject: [PATCH] docs: example for the usage of Analyses (#2168) Signed-off-by: Florian Bacher Co-authored-by: RealAnna <89971034+RealAnna@users.noreply.github.com> --- examples/support/analysis/README.md | 381 ++++++++++++++++++ .../support/analysis/config/mock-server.yaml | 211 ++++++++++ .../analysis/config/service-monitor.yaml | 74 ++++ 3 files changed, 666 insertions(+) create mode 100644 examples/support/analysis/README.md create mode 100644 examples/support/analysis/config/mock-server.yaml create mode 100644 examples/support/analysis/config/service-monitor.yaml diff --git a/examples/support/analysis/README.md b/examples/support/analysis/README.md new file mode 100644 index 0000000000..208c8a79d9 --- /dev/null +++ b/examples/support/analysis/README.md @@ -0,0 +1,381 @@ +# Metric Analysis + +This example shows how the `Analysis` feature of the `metrics-operator` can be used to +define goals for metric values and evaluate them. + +## Difference between `Analysis` and `Evaluations` + +`Evaluations` provide an easy way of checking whether the current value of a `KeptnMetric` fulfills +a requirement, i.e. is below or above a certain threshold. +This is ideal for doing simple checks, such as validating that a cluster currently has +enough resources for the pod of a deployment to be scaled up. +In other cases however, it may not be enough to just check the current value of a metric (e.g. the number of +available resources), but one may want to evaluate the value of a metric for a specific timeframe. +For example, one may execute load tests for a service after it has been deployed, and then verify +if performance-related metrics, such as response time or error rate during the time +when the tests have been executed, meet certain criteria. +That is where the `Analysis` capabilities of the `metrics-operator` come in. + +## Defining Metrics + +The first step using this feature is to figure out what it is we want to analyze. +In our example, we would like to analyze the response time and the error rate for a service. +Let's assume those metrics are retrieved from Prometheus, so first we're going to create a `KeptnMetricsProvider`. +Just like when using `KeptnMetrics`, the provider will tell Keptn where to retrieve the values we are interested in. + +To create the provider, execute the following commands from this directory: + +```shell +kubectl create namespace analysis-demo +``` + +```shell +cat < + - **`warning`**: Indicates whether a warning has been issued (false in this case). + - **`pass`**: Indicates whether the objective has passed (true in this case). + - **`objective`**: Describes the objective being evaluated. +It includes: + - **`analysisValueTemplateRef`**: Refers to the template used for analysis (`response-time-p95`). + - **`target`**: Sets the target values for failure and warning conditions. +In this case, failure occurs if the value is greater than 500 milliseconds, +and warning occurs if it's greater than 300 milliseconds. + - **`weight`**: Specifies the weight assigned to this objective (weight: 1). + - **`value`**: Indicates the actual value measured for this objective (value: 0.00475). + - **`score`**: Indicates the score assigned to this objective (score: 1). + +- The second item in the array: + - **`result`**: Similar to the first objective, it checks whether a value is +greater than 0 and has not been fulfilled (`fulfilled: false`). +There are no warning conditions in this case. + - **`objective`**: Describes the objective related to error rate analysis. + - **`analysisValueTemplateRef`**: Refers to the template used for analysis (`error-rate`). + - **`target`**: Sets the target value for failure (failure occurs if the value is greater than 0). + - **`weight`**: Specifies the weight assigned to this objective (weight: 1). + - **`keyObjective`**: Indicates that this is a key objective (true). + + - **`value`**: Indicates the actual value measured for this objective (value: 0). + - **`score`**: Indicates the score assigned to this objective (score: 1). + +**`totalScore`**: Represents the total score achieved based on the objectives evaluated (totalScore: 2). + +**`maximumScore`**: Indicates the maximum possible score (maximumScore: 2). + +**`pass`**: Indicates whether the overall evaluation has passed (true in this case). + +**`warning`**: Indicates whether any warnings have been issued during the evaluation (false in this case). diff --git a/examples/support/analysis/config/mock-server.yaml b/examples/support/analysis/config/mock-server.yaml new file mode 100644 index 0000000000..896a4513d0 --- /dev/null +++ b/examples/support/analysis/config/mock-server.yaml @@ -0,0 +1,211 @@ +apiVersion: v1 +kind: Service +metadata: + name: mockserver + namespace: analysis-demo +spec: + ports: + - name: serviceport + port: 1080 + protocol: TCP + targetPort: serviceport + selector: + app: mockserver + sessionAffinity: None + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: mockserver + name: mockserver + namespace: analysis-demo +spec: + replicas: 1 + selector: + matchLabels: + app: mockserver + template: + metadata: + labels: + app: mockserver + name: mockserver + spec: + containers: + - env: + - name: MOCKSERVER_LOG_LEVEL + value: INFO + - name: SERVER_PORT + value: "1080" + image: mockserver/mockserver:mockserver-5.13.0 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 10 + initialDelaySeconds: 10 + periodSeconds: 5 + successThreshold: 1 + tcpSocket: + port: serviceport + timeoutSeconds: 1 + name: mockserver + ports: + - containerPort: 1080 + name: serviceport + protocol: TCP + readinessProbe: + failureThreshold: 10 + initialDelaySeconds: 2 + periodSeconds: 2 + successThreshold: 1 + tcpSocket: + port: serviceport + timeoutSeconds: 1 + volumeMounts: + - mountPath: /config + name: config-volume + - mountPath: /libs + name: libs-volume + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + defaultMode: 420 + name: mockserver-config + optional: true + name: config-volume + - configMap: + defaultMode: 420 + name: mockserver-config + optional: true + name: libs-volume +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: mockserver-config + namespace: analysis-demo +data: + initializerJson.json: |- + [ + { + "httpRequest": { + "path": "/api/v1/query_range", + "method": "POST", + "body" : { + "type" : "PARAMETERS", + "parameters" : { + "query" : [ "histogram_quantile(0.95, sum by(le) (rate(http_server_request_latency_seconds_bucket{job=\"podtato-head-frontend\"}[1m])))" ] + } + } + }, + "httpResponse": { + "body": { + "status": "success", + "data": { + "resultType": "matrix", + "result": [ + { + "metric": { + "__name__": "metric-name", + "job": "", + "instance": "" + }, + "values": [[1669714193.275, "0.01"]] + } + ] + } + }, + "statusCode": 200 + } + }, + { + "httpRequest": { + "path": "/api/v1/query_range", + "method": "POST", + "body" : { + "type" : "PARAMETERS", + "parameters" : { + "query" : [ "rate(http_requests_total{status_code=\"500\", job=\"podtato-head-frontend\"}[1m]) or on() vector(0)" ] + } + } + }, + "httpResponse": { + "body": { + "status": "success", + "data": { + "resultType": "matrix", + "result": [ + { + "metric": { + "__name__": "metric-name", + "job": "", + "instance": "" + }, + "values": [[1669714193.275, "0"]] + } + ] + } + }, + "statusCode": 200 + } + }, + { + "httpRequest": { + "path": "/api/v1/query_range", + "method": "POST", + "body" : { + "type" : "PARAMETERS", + "parameters" : { + "query" : [ "query-3" ] + } + } + }, + "httpResponse": { + "body": { + "status": "success", + "data": { + "resultType": "matrix", + "result": [ + { + "metric": { + "__name__": "metric-name", + "job": "", + "instance": "" + }, + "values": [[1669714193.275, "30"]] + } + ] + } + }, + "statusCode": 200 + } + } + ] + mockserver.properties: |- + ############################### + # MockServer & Proxy Settings # + ############################### + # Socket & Port Settings + # socket timeout in milliseconds (default 120000) + mockserver.maxSocketTimeout=120000 + # Certificate Generation + # dynamically generated CA key pair (if they don't already exist in + specified directory) + mockserver.dynamicallyCreateCertificateAuthorityCertificate=true + # save dynamically generated CA key pair in working directory + mockserver.directoryToSaveDynamicSSLCertificate=. + # certificate domain name (default "localhost") + mockserver.sslCertificateDomainName=localhost + # comma separated list of ip addresses for Subject Alternative Name domain + names (default empty list) + mockserver.sslSubjectAlternativeNameDomains=www.example.com,www.another.com + # comma separated list of ip addresses for Subject Alternative Name ips + (default empty list) + mockserver.sslSubjectAlternativeNameIps=127.0.0.1 + # CORS + # enable CORS for MockServer REST API + mockserver.enableCORSForAPI=true + # enable CORS for all responses + mockserver.enableCORSForAllResponses=true + # Json Initialization + mockserver.initializationJsonPath=/config/initializerJson.json diff --git a/examples/support/analysis/config/service-monitor.yaml b/examples/support/analysis/config/service-monitor.yaml new file mode 100644 index 0000000000..c3274b8986 --- /dev/null +++ b/examples/support/analysis/config/service-monitor.yaml @@ -0,0 +1,74 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: prometheus + app.kubernetes.io/instance: k8s + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: kube-prometheus + app.kubernetes.io/version: 2.39.1 + name: prometheus-k8s + namespace: podtato-kubectl +rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: prometheus + app.kubernetes.io/instance: k8s + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: kube-prometheus + app.kubernetes.io/version: 2.39.1 + name: prometheus-k8s + namespace: podtato-kubectl +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-k8s +subjects: + - kind: ServiceAccount + name: prometheus-k8s + namespace: monitoring +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: podtato-head + namespace: monitoring +spec: + endpoints: + - interval: 30s + port: http + namespaceSelector: + matchNames: + - podtato-kubectl + selector: + matchLabels: + app: podtato-head