From 3c2a0a0d939af42687066f66fc268daef84f6327 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 7 Sep 2023 10:13:26 -0400 Subject: [PATCH] Various changes (#1622) * Various changes Signed-off-by: Alan Cha * Remove extraneous function Signed-off-by: Alan Cha * Bump chart version Signed-off-by: Alan Cha * Add test Signed-off-by: Alan Cha * Bump golang version Signed-off-by: Alan Cha * Update CONTRIBUTING and README Signed-off-by: Alan Cha * Update CONTRIBUTING Signed-off-by: Alan Cha * Fix typo Signed-off-by: Alan Cha * Respond to Michael's comments Signed-off-by: Alan Cha * Fix test Signed-off-by: Alan Cha * Add tests Signed-off-by: Alan Cha * Fix workflow tests Signed-off-by: Alan Cha * Fix workflows Signed-off-by: Alan Cha * Fix workflows Signed-off-by: Alan Cha * Update kubectl log Signed-off-by: Alan Cha --------- Signed-off-by: Alan Cha --- .github/workflows/testcharts.yaml | 78 ++-- .github/workflows/verifyuserexperience.yaml | 28 +- CONTRIBUTING.md | 4 +- README.md | 12 +- action/delete.go | 28 -- action/delete_test.go | 33 -- action/launch.go | 65 --- action/launch_test.go | 51 --- action/log.go | 26 -- action/log_test.go | 49 --- action/run.go | 4 - action/run_test.go | 6 +- base/collect_grpc.go | 2 +- base/collect_grpc_test.go | 118 ++++++ base/collect_http.go | 33 +- base/collect_http_test.go | 144 +++++-- base/experiment_test.go | 41 ++ base/kubedriver_test.go | 15 + base/notify.go | 19 +- base/notify_test.go | 20 +- base/summarymetrics/doc.go | 2 - base/summarymetrics/summary_metric.go | 106 ----- base/summarymetrics/summary_metric_test.go | 26 -- base/test_helpers.go | 8 + charts/iter8/Chart.yaml | 2 +- charts/iter8/README.md | 4 +- charts/iter8/templates/_k-job.tpl | 4 +- charts/iter8/templates/_k-role.tpl | 4 +- charts/iter8/templates/_k-rolebinding.tpl | 4 +- charts/iter8/templates/_k-secret.tpl | 2 +- charts/iter8/templates/_payload-github.tpl | 4 - charts/iter8/templates/_payload-slack.tpl | 3 - charts/litmuschaos/Chart.yaml | 19 - charts/litmuschaos/templates/_chaosengine.tpl | 23 -- .../templates/_chaosexperiment.tpl | 90 ----- charts/litmuschaos/templates/_rbac.tpl | 45 --- charts/litmuschaos/templates/resources.yaml | 5 - charts/litmuschaos/values.yaml | 8 - cmd/controllers.go | 2 +- cmd/k.go | 15 +- cmd/kdelete.go | 34 -- cmd/kdelete_test.go | 37 -- cmd/klaunch.go | 110 ----- cmd/klaunch_test.go | 27 -- cmd/klog.go | 39 -- cmd/klog_test.go | 54 --- cmd/krun.go | 10 +- cmd/krun_test.go | 8 +- cmd/root.go | 12 - cmd/version.go | 2 +- config.yaml | 1 + driver/common.go | 6 +- driver/filedriver.go | 53 --- driver/filedriver_test.go | 84 ---- driver/kubedriver.go | 283 +------------ driver/kubedriver_test.go | 103 +---- driver/test_helpers.go | 2 +- go.mod | 2 +- templates/notify/_payload-github.tpl | 2 +- templates/notify/_payload-slack.tpl | 2 +- .../blue-green-http-kserve/bumpweights.sh | 4 - .../blue-green-http-kserve/cleanup.sh | 6 - .../blue-green-http-kserve/execintosleep.sh | 6 - .../blue-green-http-kserve/initialize.sh | 130 ------ .../blue-green-http-kserve/promote-v2.sh | 22 - .../blue-green-http-kserve/sleep.sh | 52 --- .../blue-green-http-kserve/steps.sh | 41 -- .../blue-green-http-kserve/v2-candidate.sh | 18 - .../controllers/canary-http-kserve/cleanup.sh | 6 - .../canary-http-kserve/execintosleep.sh | 6 - .../canary-http-kserve/initialize.sh | 88 ---- .../canary-http-kserve/promote-v2.sh | 19 - .../controllers/canary-http-kserve/sleep.sh | 52 --- .../controllers/canary-http-kserve/steps.sh | 16 - .../canary-http-kserve/v2-candidate.sh | 18 - testdata/controllers/installkserve.sh | 2 - .../controllers/mirror-grpc-kserve/cleanup.sh | 6 - .../mirror-grpc-kserve/execintosleep.sh | 6 - .../mirror-grpc-kserve/initialize.sh | 107 ----- .../mirror-grpc-kserve/promote-v2.sh | 24 -- .../controllers/mirror-grpc-kserve/sleep.sh | 381 ------------------ .../controllers/mirror-grpc-kserve/steps.sh | 16 - .../mirror-grpc-kserve/v2-candidate.sh | 29 -- testdata/controllers/mirror/cleanup.sh | 6 - .../controllers/mirror/default-routing.sh | 85 ---- testdata/controllers/mirror/httpbin-v1.sh | 28 -- testdata/controllers/mirror/httpbin-v2.sh | 28 -- testdata/controllers/mirror/sleep.sh | 22 - testdata/controllers/upgradeistio.sh | 1 - testdata/metrics/test-ce.metrics.yaml | 77 ---- .../metrics/test-request-body.metrics.yaml | 16 - 91 files changed, 429 insertions(+), 2912 deletions(-) delete mode 100644 action/delete.go delete mode 100644 action/delete_test.go delete mode 100644 action/launch.go delete mode 100644 action/launch_test.go delete mode 100644 action/log.go delete mode 100644 action/log_test.go create mode 100644 base/kubedriver_test.go delete mode 100644 base/summarymetrics/doc.go delete mode 100644 base/summarymetrics/summary_metric.go delete mode 100644 base/summarymetrics/summary_metric_test.go delete mode 100644 charts/iter8/templates/_payload-github.tpl delete mode 100644 charts/iter8/templates/_payload-slack.tpl delete mode 100644 charts/litmuschaos/Chart.yaml delete mode 100644 charts/litmuschaos/templates/_chaosengine.tpl delete mode 100644 charts/litmuschaos/templates/_chaosexperiment.tpl delete mode 100644 charts/litmuschaos/templates/_rbac.tpl delete mode 100644 charts/litmuschaos/templates/resources.yaml delete mode 100644 charts/litmuschaos/values.yaml delete mode 100644 cmd/kdelete.go delete mode 100644 cmd/kdelete_test.go delete mode 100644 cmd/klaunch.go delete mode 100644 cmd/klaunch_test.go delete mode 100644 cmd/klog.go delete mode 100644 cmd/klog_test.go delete mode 100644 driver/filedriver.go delete mode 100644 driver/filedriver_test.go delete mode 100755 testdata/controllers/blue-green-http-kserve/bumpweights.sh delete mode 100755 testdata/controllers/blue-green-http-kserve/cleanup.sh delete mode 100755 testdata/controllers/blue-green-http-kserve/execintosleep.sh delete mode 100755 testdata/controllers/blue-green-http-kserve/initialize.sh delete mode 100755 testdata/controllers/blue-green-http-kserve/promote-v2.sh delete mode 100755 testdata/controllers/blue-green-http-kserve/sleep.sh delete mode 100644 testdata/controllers/blue-green-http-kserve/steps.sh delete mode 100755 testdata/controllers/blue-green-http-kserve/v2-candidate.sh delete mode 100755 testdata/controllers/canary-http-kserve/cleanup.sh delete mode 100755 testdata/controllers/canary-http-kserve/execintosleep.sh delete mode 100755 testdata/controllers/canary-http-kserve/initialize.sh delete mode 100755 testdata/controllers/canary-http-kserve/promote-v2.sh delete mode 100755 testdata/controllers/canary-http-kserve/sleep.sh delete mode 100644 testdata/controllers/canary-http-kserve/steps.sh delete mode 100755 testdata/controllers/canary-http-kserve/v2-candidate.sh delete mode 100755 testdata/controllers/installkserve.sh delete mode 100755 testdata/controllers/mirror-grpc-kserve/cleanup.sh delete mode 100755 testdata/controllers/mirror-grpc-kserve/execintosleep.sh delete mode 100755 testdata/controllers/mirror-grpc-kserve/initialize.sh delete mode 100755 testdata/controllers/mirror-grpc-kserve/promote-v2.sh delete mode 100755 testdata/controllers/mirror-grpc-kserve/sleep.sh delete mode 100644 testdata/controllers/mirror-grpc-kserve/steps.sh delete mode 100755 testdata/controllers/mirror-grpc-kserve/v2-candidate.sh delete mode 100755 testdata/controllers/mirror/cleanup.sh delete mode 100755 testdata/controllers/mirror/default-routing.sh delete mode 100755 testdata/controllers/mirror/httpbin-v1.sh delete mode 100755 testdata/controllers/mirror/httpbin-v2.sh delete mode 100755 testdata/controllers/mirror/sleep.sh delete mode 100755 testdata/controllers/upgradeistio.sh delete mode 100644 testdata/metrics/test-ce.metrics.yaml delete mode 100644 testdata/metrics/test-request-body.metrics.yaml diff --git a/.github/workflows/testcharts.yaml b/.github/workflows/testcharts.yaml index ff224b5ff..c8f794bcb 100644 --- a/.github/workflows/testcharts.yaml +++ b/.github/workflows/testcharts.yaml @@ -45,7 +45,7 @@ jobs: # TODO: add check to verify when a change is made to kustomize, a similar change is made to charts http-experiment: - name: HTTP load test + name: HTTP performance test needs: get_versions runs-on: ubuntu-latest strategy: @@ -85,14 +85,11 @@ jobs: - name: Start performance test if: steps.modified-files.outputs.any_modified == 'true' run: | - helm upgrade --install \ - --repo https://iter8-tools.github.io/iter8 --version 0.16 httpbin-test iter8 \ - --localChart \ - --chartName charts/iter8 \ + helm upgrade --install httpbin-test charts/iter8 \ --set "tasks={http}" \ --set http.url="http://httpbin.default/get" \ --set logLevel=trace - kubectl wait --for=condition=complete --timeout=180s job/default-1-job + kubectl wait --for=condition=complete --timeout=180s job/httpbin-test-1-job - name: Get Kubernetes status if: steps.modified-files.outputs.any_modified == 'true' @@ -102,7 +99,7 @@ jobs: - name: View test logs and delete test if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl logs -l iter8.tools/group=httpbin-test + kubectl logs -l iter8.tools/test=httpbin-test helm delete httpbin-test - name: Expose metrics service @@ -113,10 +110,10 @@ jobs: - name: Check GET /httpDashboard if: steps.modified-files.outputs.any_modified == 'true' run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=httpbin-test" -f http-payload-experiment: - name: HTTP load test with payload + name: HTTP performance test with payload needs: get_versions runs-on: ubuntu-latest strategy: @@ -156,15 +153,12 @@ jobs: - name: Start performance test if: steps.modified-files.outputs.any_modified == 'true' run: | - helm upgrade --install \ - --repo https://iter8-tools.github.io/iter8 --version 0.16 httpbin-test iter8 \ - --localChart \ - --chartName charts/iter8 \ + helm upgrade --install httpbin-test charts/iter8 \ --set "tasks={http}" \ --set http.url="http://httpbin.default/post" \ --set http.payloadStr=hello \ --set logLevel=trace - kubectl wait --for=condition=complete --timeout=180s job/default-1-job + kubectl wait --for=condition=complete --timeout=180s job/httpbin-test-1-job - name: Get Kubernetes status if: steps.modified-files.outputs.any_modified == 'true' @@ -174,7 +168,7 @@ jobs: - name: View test logs and delete test if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl logs -l iter8.tools/group=httpbin-test + kubectl logs -l iter8.tools/test=httpbin-test helm delete httpbin-test - name: Expose metrics service @@ -185,10 +179,10 @@ jobs: - name: Check GET /httpDashboard if: steps.modified-files.outputs.any_modified == 'true' run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=httpbin-test" -f http-multiple-experiment: - name: HTTP load test with multiple endpoints + name: HTTP performance test with multiple endpoints needs: get_versions runs-on: ubuntu-latest strategy: @@ -228,17 +222,14 @@ jobs: - name: Start performance test if: steps.modified-files.outputs.any_modified == 'true' run: | - helm upgrade --install \ - --repo https://iter8-tools.github.io/iter8 --version 0.16 httpbin-test iter8 \ - --localChart \ - --chartName charts/iter8 \ + helm upgrade --install httpbin-test charts/iter8 \ --set "tasks={http}" \ --set http.endpoints.get.url=http://httpbin.default/get \ --set http.endpoints.getAnything.url=http://httpbin.default/anything \ --set http.endpoints.post.url=http://httpbin.default/post \ --set http.endpoints.post.payloadStr=hello \ --set logLevel=trace - kubectl wait --for=condition=complete --timeout=180s job/default-1-job + kubectl wait --for=condition=complete --timeout=180s job/httpbin-test-1-job - name: Get Kubernetes status if: steps.modified-files.outputs.any_modified == 'true' @@ -248,7 +239,7 @@ jobs: - name: View test logs and delete test if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl logs -l iter8.tools/group=httpbin-test + kubectl logs -l iter8.tools/test=httpbin-test helm delete httpbin-test - name: Expose metrics service @@ -259,10 +250,10 @@ jobs: - name: Check GET /httpDashboard if: steps.modified-files.outputs.any_modified == 'true' run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=httpbin-test" -f grpc-experiment: - name: gRPC load test + name: gRPC performance test needs: get_versions runs-on: ubuntu-latest strategy: @@ -311,10 +302,7 @@ jobs: - name: Start performance test if: steps.modified-files.outputs.any_modified == 'true' run: | - helm upgrade --install \ - --repo https://iter8-tools.github.io/iter8 --version 0.16 routeguide-test iter8 \ - --localChart \ - --chartName charts/iter8 \ + helm upgrade --install routeguide-test charts/iter8 \ --set "tasks={ready,grpc}" \ --set ready.deploy=routeguide \ --set ready.service=routeguide \ @@ -324,7 +312,7 @@ jobs: --set grpc.call=routeguide.RouteGuide.GetFeature \ --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ --set logLevel=trace - kubectl wait --for=condition=complete --timeout=180s job/default-1-job + kubectl wait --for=condition=complete --timeout=180s job/routeguide-test-1-job - name: Get Kubernetes status if: steps.modified-files.outputs.any_modified == 'true' @@ -334,7 +322,7 @@ jobs: - name: View test logs and delete test if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl logs -l iter8.tools/group=routeguide-test + kubectl logs -l iter8.tools/test=routeguide-test helm delete routeguide-test - name: Expose metrics service @@ -345,10 +333,10 @@ jobs: - name: Check GET /grpcDashboard if: steps.modified-files.outputs.any_modified == 'true' run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=routeguide-test" -f grpc-multiple-experiment: - name: gRPC load test with multiple endpoints + name: gRPC performance test with multiple endpoints needs: get_versions runs-on: ubuntu-latest strategy: @@ -397,10 +385,7 @@ jobs: - name: Start performance test if: steps.modified-files.outputs.any_modified == 'true' run: | - helm upgrade --install \ - --repo https://iter8-tools.github.io/iter8 --version 0.16 routeguide-test iter8 \ - --localChart \ - --chartName charts/iter8 \ + helm upgrade --install routeguide-test charts/iter8 \ --set "tasks={ready,grpc}" \ --set ready.deploy=routeguide \ --set ready.service=routeguide \ @@ -412,7 +397,7 @@ jobs: --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ --set logLevel=trace - kubectl wait --for=condition=complete --timeout=180s job/default-1-job + kubectl wait --for=condition=complete --timeout=180s job/routeguide-test-1-job - name: Get Kubernetes status if: steps.modified-files.outputs.any_modified == 'true' @@ -422,7 +407,7 @@ jobs: - name: View test logs and delete test if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl logs -l iter8.tools/group=routeguide-test + kubectl logs -l iter8.tools/test=routeguide-test helm delete routeguide-test - name: Expose metrics service @@ -433,10 +418,10 @@ jobs: - name: Check GET /grpcDashboard if: steps.modified-files.outputs.any_modified == 'true' run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=routeguide-test" -f grpc-experiment2: - name: gRPC load test 2 + name: gRPC performance test 2 needs: get_versions runs-on: ubuntu-latest strategy: @@ -476,16 +461,13 @@ jobs: - name: Start performance test if: steps.modified-files.outputs.any_modified == 'true' run: | - helm upgrade --install \ - --repo https://iter8-tools.github.io/iter8 --version 0.16 hello-test iter8 \ - --localChart \ - --chartName charts/iter8 \ + helm upgrade --install hello-test charts/iter8 \ --set "tasks={grpc}" \ --set grpc.host="hello.default:50051" \ --set grpc.call="helloworld.Greeter.SayHello" \ --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ --set logLevel=trace - kubectl wait --for=condition=complete --timeout=180s job/default-1-job + kubectl wait --for=condition=complete --timeout=180s job/hello-test-1-job - name: Get Kubernetes status if: steps.modified-files.outputs.any_modified == 'true' @@ -495,7 +477,7 @@ jobs: - name: View test logs and delete test if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl logs -l iter8.tools/group=hello-test + kubectl logs -l iter8.tools/test=hello-test helm delete hello-test - name: Expose metrics service @@ -506,4 +488,4 @@ jobs: - name: Check GET /grpcDashboard if: steps.modified-files.outputs.any_modified == 'true' run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f \ No newline at end of file + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=hello-test" -f \ No newline at end of file diff --git a/.github/workflows/verifyuserexperience.yaml b/.github/workflows/verifyuserexperience.yaml index ec0aa252b..ca8a1041b 100644 --- a/.github/workflows/verifyuserexperience.yaml +++ b/.github/workflows/verifyuserexperience.yaml @@ -13,7 +13,7 @@ on: jobs: http-experiment: - name: HTTP load test + name: HTTP performance test runs-on: ubuntu-latest steps: @@ -48,7 +48,7 @@ jobs: - name: View test logs and delete test run: | - kubectl logs -l iter8.tools/group=httpbin-test + kubectl logs -l iter8.tools/test=httpbin-test helm delete httpbin-test - name: Expose metrics service @@ -60,7 +60,7 @@ jobs: curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f http-payload-experiment: - name: HTTP load test with payload + name: HTTP performance test with payload runs-on: ubuntu-latest steps: @@ -96,7 +96,7 @@ jobs: - name: View test logs and delete test run: | - kubectl logs -l iter8.tools/group=httpbin-test + kubectl logs -l iter8.tools/test=httpbin-test helm delete httpbin-test @@ -109,7 +109,7 @@ jobs: curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f http-multiple-experiment: - name: HTTP load test with multiple endpoints + name: HTTP performance test with multiple endpoints runs-on: ubuntu-latest steps: @@ -146,7 +146,7 @@ jobs: - name: View test logs and delete test run: | - kubectl logs -l iter8.tools/group=httpbin-test + kubectl logs -l iter8.tools/test=httpbin-test helm delete httpbin-test - name: Expose metrics service @@ -158,7 +158,7 @@ jobs: curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f grpc-experiment: - name: gRPC load test + name: gRPC performance test runs-on: ubuntu-latest steps: @@ -207,7 +207,7 @@ jobs: - name: View test logs and delete test run: | - kubectl logs -l iter8.tools/group=routeguide-test + kubectl logs -l iter8.tools/test=routeguide-test helm delete routeguide-test - name: Expose metrics service @@ -219,7 +219,7 @@ jobs: curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f grpc-multiple-experiment: - name: gRPC load test with multiple endpoints + name: gRPC performance test with multiple endpoints runs-on: ubuntu-latest steps: @@ -270,7 +270,7 @@ jobs: - name: View test logs and delete test run: | - kubectl logs -l iter8.tools/group=routeguide-test + kubectl logs -l iter8.tools/test=routeguide-test helm delete routeguide-test - name: Expose metrics service @@ -282,7 +282,7 @@ jobs: curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f grpc-experiment2: - name: gRPC load test 2 + name: gRPC performance test 2 runs-on: ubuntu-latest steps: @@ -319,7 +319,7 @@ jobs: - name: View test logs and delete test run: | - kubectl logs -l iter8.tools/group=hello-test + kubectl logs -l iter8.tools/test=hello-test helm delete hello-test - name: Expose metrics service @@ -370,7 +370,7 @@ jobs: - name: View test logs and delete test run: | - kubectl logs -l iter8.tools/group=httpbin-test + kubectl logs -l iter8.tools/test=httpbin-test helm delete httpbin-test - name: Expose metrics service @@ -427,7 +427,7 @@ jobs: - name: View test logs and delete test run: | - kubectl logs -l iter8.tools/group=httpbin-test + kubectl logs -l iter8.tools/test=httpbin-test helm delete httpbin-test - name: Expose metrics service diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 067f0e1a9..6a21d5856 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,7 +14,7 @@ If anything doesn't make sense, or doesn't work when you run it, please open a b We welcome many types of contributions including: -* [CLI and Iter8 experiment charts](#iter8-toolsiter8) +* [CLI and Iter8 performance test charts](#iter8-toolsiter8) * [Docs](#iter8-toolsdocs) * CI, builds, and tests * Reviewing pull requests @@ -71,7 +71,7 @@ repository, you can amend your commit with the sign-off by running: The Iter8 project consists of the following repos. -1. [iter8-tools/iter8](https://github.com/iter8-tools/iter8): source for the Iter8 CLI, experiment, and controller charts +1. [iter8-tools/iter8](https://github.com/iter8-tools/iter8): source for the Iter8 CLI, performance test, and controller charts 2. [iter8-tools/docs](https://github.com/iter8-tools/docs): source for Iter8 docs ### iter8-tools/iter8 diff --git a/README.md b/README.md index 3840ac415..d4c985b7a 100644 --- a/README.md +++ b/README.md @@ -12,19 +12,19 @@ Iter8 supports the following use-cases: 3. A/B/n testing of applications and ML models 4. Reliable and automated routing: blue-green and canary -## :rocket: Iter8 experiment +## :rocket: Iter8 performance test -Iter8 introduces the notion of an experiment, which is a list of configurable tasks that are executed in a specific sequence. +Iter8 introduces a set of tasks which which can be composed in order to conduct a variety of performance tests.

- +

-Iter8 packs a number of powerful features that facilitate Kubernetes app testing and experimentation. They include the following. +Iter8 packs a number of powerful features that facilitate Kubernetes app testing. They include the following. 1. **Generating load and collecting built-in metrics for HTTP and gRPC services.** Simplifies performance testing by eliminating the need to setup and use metrics databases. -2. **Readiness check.** The performance testing portion of the experiment begins only after the service is ready. -3. **Experiment anywhere.** Iter8 experiments can be launched inside a Kubernetes cluster, in local environments, or inside a GitHub Actions pipeline. +2. **Readiness check.** The performance testing portion can be configured to start only after the service is ready. +3. **Test anywhere.** Iter8 performance tests can be launched inside a Kubernetes cluster, in local environments, or inside a GitHub Actions pipeline. 4. **Traffic controller.** Automatically and dynamically reconfigures routing resources based on the state of Kubernetes apps/ML models. 5. **Client-side SDK.** Facilitates routing and metrics collection task associated with distributed (i.e., client-server architecture-based) A/B/n testing in Kubernetes. diff --git a/action/delete.go b/action/delete.go deleted file mode 100644 index c7ef13bdd..000000000 --- a/action/delete.go +++ /dev/null @@ -1,28 +0,0 @@ -package action - -import ( - "github.com/iter8-tools/iter8/driver" -) - -// DeleteOpts are the options used for deleting experiment groups -type DeleteOpts struct { - // KubeDriver enables access to Kubernetes cluster - *driver.KubeDriver -} - -// NewDeleteOpts initializes and returns launch opts -func NewDeleteOpts(kd *driver.KubeDriver) *DeleteOpts { - return &DeleteOpts{ - KubeDriver: kd, - } -} - -// KubeRun deletes a Kubernetes experiment -func (dOpts *DeleteOpts) KubeRun() error { - // initialize kube driver - if err := dOpts.KubeDriver.Init(); err != nil { - return err - } - - return dOpts.KubeDriver.Delete() -} diff --git a/action/delete_test.go b/action/delete_test.go deleted file mode 100644 index b60681ca0..000000000 --- a/action/delete_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package action - -import ( - "testing" - - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/driver" - "github.com/stretchr/testify/assert" - "helm.sh/helm/v3/pkg/cli" -) - -func TestKubeDelete(t *testing.T) { - var err error - - // fix lOpts - lOpts := NewLaunchOpts(driver.NewFakeKubeDriver(cli.New())) - lOpts.ChartName = base.CompletePath("../charts", "iter8") - lOpts.LocalChart = true - lOpts.Values = []string{"tasks={http}", "http.url=https://iter8.tools", "http.duration=2s"} - - err = lOpts.KubeRun() - assert.NoError(t, err) - - rel, err := lOpts.Releases.Last(lOpts.Group) - assert.NotNil(t, rel) - assert.Equal(t, 1, rel.Version) - assert.NoError(t, err) - - // fix dOpts - dOpts := NewDeleteOpts(lOpts.KubeDriver) - err = dOpts.KubeRun() - assert.NoError(t, err) -} diff --git a/action/launch.go b/action/launch.go deleted file mode 100644 index b2c210396..000000000 --- a/action/launch.go +++ /dev/null @@ -1,65 +0,0 @@ -package action - -import ( - "strings" - - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/driver" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/cli/values" -) - -const ( - // DefaultHelmRepository is the URL of the default Helm repository - DefaultHelmRepository = "https://iter8-tools.github.io/iter8" - // DefaultChartName is the default name of the Iter8 chart - DefaultChartName = "iter8" -) - -// LaunchOpts are the options used for launching experiments -type LaunchOpts struct { - // DryRun enables simulating a launch - DryRun bool - // ChartPathOptions - action.ChartPathOptions - // ChartName is the name of the chart - ChartName string - // Options provides the values to be combined with the experiment chart - values.Options - // Rundir is the directory where experiment.yaml file is located - RunDir string - // KubeDriver enables Kubernetes experiment run - *driver.KubeDriver - // LocalChart indicates the chart is on the local filesystem - LocalChart bool -} - -// NewLaunchOpts initializes and returns launch opts -func NewLaunchOpts(kd *driver.KubeDriver) *LaunchOpts { - return &LaunchOpts{ - DryRun: false, - ChartPathOptions: action.ChartPathOptions{ - RepoURL: DefaultHelmRepository, - Version: defaultChartVersion(), - }, - ChartName: DefaultChartName, - Options: values.Options{}, - RunDir: ".", - KubeDriver: kd, - LocalChart: false, - } -} - -func defaultChartVersion() string { - return strings.Replace(base.MajorMinor, "v", "", 1) + ".x" -} - -// KubeRun launches a Kubernetes experiment -func (lOpts *LaunchOpts) KubeRun() error { - // initialize kube driver - if err := lOpts.KubeDriver.Init(); err != nil { - return err - } - - return lOpts.KubeDriver.Launch(lOpts.ChartPathOptions, lOpts.ChartName, lOpts.Options, lOpts.Group, lOpts.DryRun) -} diff --git a/action/launch_test.go b/action/launch_test.go deleted file mode 100644 index c5705a3ea..000000000 --- a/action/launch_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package action - -import ( - "fmt" - "os" - "testing" - - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/driver" - "github.com/stretchr/testify/assert" - "helm.sh/helm/v3/pkg/cli" -) - -func TestKubeLaunch(t *testing.T) { - var err error - _ = os.Chdir(t.TempDir()) - - // fix lOpts - lOpts := NewLaunchOpts(driver.NewFakeKubeDriver(cli.New())) - lOpts.Values = []string{"tasks={http}", "http.url=https://httpbin.org/get", "http.duration=2s"} - - err = lOpts.KubeRun() - assert.NoError(t, err) - - fmt.Println(lOpts.Group) - fmt.Println(lOpts.Releases) - - rel, err := lOpts.Releases.Last(lOpts.Group) - assert.NotNil(t, rel) - assert.Equal(t, 1, rel.Version) - assert.NoError(t, err) -} - -func TestKubeLaunchLocalChart(t *testing.T) { - var err error - _ = os.Chdir(t.TempDir()) - - // fix lOpts - lOpts := NewLaunchOpts(driver.NewFakeKubeDriver(cli.New())) - lOpts.ChartName = base.CompletePath("../charts", "iter8") - lOpts.LocalChart = true - lOpts.Values = []string{"tasks={http}", "http.url=https://httpbin.org/get", "http.duration=2s"} - - err = lOpts.KubeRun() - assert.NoError(t, err) - - rel, err := lOpts.Releases.Last(lOpts.Group) - assert.NotNil(t, rel) - assert.Equal(t, 1, rel.Version) - assert.NoError(t, err) -} diff --git a/action/log.go b/action/log.go deleted file mode 100644 index b1c9cf800..000000000 --- a/action/log.go +++ /dev/null @@ -1,26 +0,0 @@ -package action - -import ( - "github.com/iter8-tools/iter8/driver" -) - -// LogOpts enables fetching logs from Kubernetes -type LogOpts struct { - // KubeDriver enables interaction with Kubernetes cluster - *driver.KubeDriver -} - -// NewLogOpts initializes and returns log opts -func NewLogOpts(kd *driver.KubeDriver) *LogOpts { - return &LogOpts{ - KubeDriver: kd, - } -} - -// KubeRun fetches logs from a Kubernetes experiment -func (lOpts *LogOpts) KubeRun() (string, error) { - if err := lOpts.KubeDriver.Init(); err != nil { - return "", err - } - return lOpts.GetExperimentLogs() -} diff --git a/action/log_test.go b/action/log_test.go deleted file mode 100644 index 528ba038f..000000000 --- a/action/log_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package action - -import ( - "context" - "os" - "testing" - - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/driver" - "github.com/stretchr/testify/assert" - "helm.sh/helm/v3/pkg/cli" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestLog(t *testing.T) { - _ = os.Chdir(t.TempDir()) - var err error - - // fix lOpts - lOpts := NewLaunchOpts(driver.NewFakeKubeDriver(cli.New())) - lOpts.ChartName = base.CompletePath("../charts", "iter8") - lOpts.LocalChart = true - lOpts.Values = []string{"tasks={http}", "http.url=https://httpbin.org/get", "http.duration=2s"} - - err = lOpts.KubeRun() - assert.NoError(t, err) - - rel, err := lOpts.Releases.Last(lOpts.Group) - assert.NotNil(t, rel) - assert.Equal(t, 1, rel.Version) - assert.NoError(t, err) - - // fix lOpts - logOpts := NewLogOpts(lOpts.KubeDriver) - _, _ = logOpts.Clientset.CoreV1().Pods("default").Create(context.TODO(), &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default-1-job-8218s", - Namespace: "default", - Labels: map[string]string{ - "iter8.tools/group": "default", - }, - }, - }, metav1.CreateOptions{}) - - str, err := logOpts.KubeRun() - assert.NoError(t, err) - assert.Equal(t, "fake logs", str) -} diff --git a/action/run.go b/action/run.go index 17dc6b389..268d943f2 100644 --- a/action/run.go +++ b/action/run.go @@ -12,10 +12,6 @@ type RunOpts struct { // KubeDriver enables Kubernetes experiment run *driver.KubeDriver - - // ReuseResult configures Iter8 to reuse the experiment result instead of - // creating a new one for looping experiments. - ReuseResult bool } // NewRunOpts initializes and returns run opts diff --git a/action/run_test.go b/action/run_test.go index e6da28b44..c8796c761 100644 --- a/action/run_test.go +++ b/action/run_test.go @@ -66,19 +66,19 @@ func TestKubeRun(t *testing.T) { _ = os.Chdir(t.TempDir()) // create experiment.yaml - base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment.tpl"), url, driver.ExperimentPath) + base.CreateExperimentYaml(t, base.CompletePath("../testdata", base.ExperimentTemplateFile), url, base.ExperimentFile) // fix rOpts rOpts := NewRunOpts(driver.NewFakeKubeDriver(cli.New())) // read experiment from file created above - byteArray, _ := os.ReadFile(driver.ExperimentPath) + byteArray, _ := os.ReadFile(base.ExperimentFile) _, _ = rOpts.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: "default", }, - StringData: map[string]string{driver.ExperimentPath: string(byteArray)}, + StringData: map[string]string{base.ExperimentFile: string(byteArray)}, }, metav1.CreateOptions{}) err = rOpts.KubeRun() diff --git a/base/collect_grpc.go b/base/collect_grpc.go index 619665d23..4534ef9cf 100644 --- a/base/collect_grpc.go +++ b/base/collect_grpc.go @@ -30,7 +30,7 @@ type collectGRPCInputs struct { Endpoints map[string]runner.Config `json:"endpoints" yaml:"endpoints"` } -// collectGRPCTask enables load testing of gRPC services. +// collectGRPCTask enables performance testing of gRPC services. type collectGRPCTask struct { // TaskMeta has fields common to all tasks TaskMeta diff --git a/base/collect_grpc_test.go b/base/collect_grpc_test.go index ad77293db..f65d85ea9 100644 --- a/base/collect_grpc_test.go +++ b/base/collect_grpc_test.go @@ -342,3 +342,121 @@ func TestRunCollectGRPCSingleEndpointMultipleCalls(t *testing.T) { assert.NotNil(t, ghzResult[unary]) assert.NotNil(t, ghzResult[unary2]) } + +func TestRunCollectGRPCWithWarmup(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + + call := "helloworld.Greeter.SayHello" + + _ = os.Chdir(t.TempDir()) + callType := helloworld.Unary + gs, s, err := internal.StartServer(false) + if err != nil { + assert.FailNow(t, err.Error()) + } + t.Cleanup(s.Stop) + + // valid collect GRPC task... should succeed + warmupTrue := true + ct := &collectGRPCTask{ + TaskMeta: TaskMeta{ + Task: StringPointer(CollectGRPCTaskName), + }, + With: collectGRPCInputs{ + Config: runner.Config{ + Data: map[string]interface{}{"name": "bob"}, + Call: call, + Host: internal.LocalHostPort, + }, + Warmup: &warmupTrue, + }, + } + + log.Logger.Debug("dial timeout before defaulting... ", ct.With.DialTimeout.String()) + + exp := &Experiment{ + Spec: []Task{ct}, + Result: &ExperimentResult{}, + Metadata: ExperimentMetadata{ + Name: myName, + Namespace: myNamespace, + }, + } + exp.initResults(1) + err = ct.run(exp) + + log.Logger.Debug("dial timeout after defaulting... ", ct.With.DialTimeout.String()) + + assert.NoError(t, err) + + count := gs.GetCount(callType) + assert.Equal(t, 200, count) + + // warmup option ensures that ghz results are not written to insights + assert.Nil(t, exp.Result.Insights) +} + +// Credit: Several of the tests in this file are based on +// https://github.com/bojand/ghz/blob/master/runner/run_test.go +func TestRunCollectGRPCWithIncorrectNumVersions(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + + call := "helloworld.Greeter.SayHello" + + _ = os.Chdir(t.TempDir()) + callType := helloworld.Unary + gs, s, err := internal.StartServer(false) + if err != nil { + assert.FailNow(t, err.Error()) + } + t.Cleanup(s.Stop) + + // valid collect GRPC task... should succeed + ct := &collectGRPCTask{ + TaskMeta: TaskMeta{ + Task: StringPointer(CollectGRPCTaskName), + }, + With: collectGRPCInputs{ + Config: runner.Config{ + Data: map[string]interface{}{"name": "bob"}, + Call: call, + Host: internal.LocalHostPort, + }, + }, + } + + log.Logger.Debug("dial timeout before defaulting... ", ct.With.DialTimeout.String()) + + exp := &Experiment{ + Spec: []Task{ct}, + Result: &ExperimentResult{}, + Metadata: ExperimentMetadata{ + Name: myName, + Namespace: myNamespace, + }, + } + exp.initResults(1) + + exp.Result.Insights = &Insights{ + NumVersions: 2, // will cause grpc task to fail; grpc task expects insights been nil or numVersions set to 1 + } + + err = ct.run(exp) + + log.Logger.Debug("dial timeout after defaulting... ", ct.With.DialTimeout.String()) + + // fail because of initInsightsWithNumVersions() + assert.Error(t, err) + + count := gs.GetCount(callType) + assert.Equal(t, 200, count) + + // error ensures that ghz results are not written to insights + assert.Nil(t, exp.Result.Insights.TaskData) +} diff --git a/base/collect_http.go b/base/collect_http.go index 02394fa35..f1679e1b7 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -46,14 +46,15 @@ type endpoint struct { URL string `json:"url" yaml:"url"` // AllowInitialErrors allows and doesn't abort on initial warmup errors AllowInitialErrors *bool `json:"allowInitialErrors,omitempty" yaml:"allowInitialErrors,omitempty"` - // Warmup indicates if task execution is for warmup purposes; if so the results will be ignored - Warmup *bool `json:"warmup,omitempty" yaml:"warmup,omitempty"` } // collectHTTPInputs contain the inputs to the metrics collection task to be executed. type collectHTTPInputs struct { endpoint + // Warmup indicates if task execution is for warmup purposes; if so the results will be ignored + Warmup *bool `json:"warmup,omitempty" yaml:"warmup,omitempty"` + // Endpoints is used to define multiple endpoints to test Endpoints map[string]endpoint `json:"endpoints" yaml:"endpoints"` } @@ -81,31 +82,7 @@ var ( defaultPercentiles = [...]float64{50.0, 75.0, 90.0, 95.0, 99.0, 99.9} ) -// errorCode checks if a given code is an error code -func (t *collectHTTPTask) errorCode(code int) bool { - // connection failure - if code == -1 { - return true - } - // HTTP errors - for _, lims := range t.With.ErrorRanges { - // if no lower limit (check upper) - if lims.Lower == nil && code <= *lims.Upper { - return true - } - // if no upper limit (check lower) - if lims.Upper == nil && code >= *lims.Lower { - return true - } - // if both limits are present (check both) - if lims.Upper != nil && lims.Lower != nil && code <= *lims.Upper && code >= *lims.Lower { - return true - } - } - return false -} - -// collectHTTPTask enables load testing of HTTP services. +// collectHTTPTask enables performance testing of HTTP services. type collectHTTPTask struct { // TaskMeta has fields common to all tasks TaskMeta @@ -152,7 +129,7 @@ func getFortioOptions(c endpoint) (*fhttp.HTTPRunnerOptions, error) { // basic runner fo := &fhttp.HTTPRunnerOptions{ RunnerOptions: periodic.RunnerOptions{ - RunType: "Iter8 load test", + RunType: "Iter8 HTTP performance test", QPS: float64(*c.QPS), NumThreads: *c.Connections, Percentiles: c.Percentiles, diff --git a/base/collect_http_test.go b/base/collect_http_test.go index 1c2900809..b82762933 100644 --- a/base/collect_http_test.go +++ b/base/collect_http_test.go @@ -370,30 +370,122 @@ func TestRunCollectHTTPMultipleNoEndpoints(t *testing.T) { assert.Equal(t, 0, len(httpResult)) } -func TestErrorCode(t *testing.T) { - task := collectHTTPTask{} - assert.True(t, task.errorCode(-1)) - - // if no lower limit (check upper) - upper := 10 - task.With.ErrorRanges = append(task.With.ErrorRanges, errorRange{ - Upper: &upper, - }) - assert.True(t, task.errorCode(5)) - - // if no upper limit (check lower) - task.With.ErrorRanges = []errorRange{} - lower := 1 - task.With.ErrorRanges = append(task.With.ErrorRanges, errorRange{ - Lower: &lower, - }) - assert.True(t, task.errorCode(5)) - - // if both limits are present (check both) - task.With.ErrorRanges = []errorRange{} - task.With.ErrorRanges = append(task.With.ErrorRanges, errorRange{ - Upper: &upper, - Lower: &lower, - }) - assert.True(t, task.errorCode(5)) +func TestRunCollectHTTPWithWarmup(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + + mux, addr := fhttp.DynamicHTTPServer(false) + + // /foo/ handler + called := false // ensure that the /foo/ handler is called + handler := func(w http.ResponseWriter, r *http.Request) { + called = true + data, _ := io.ReadAll(r.Body) + testData, _ := os.ReadFile(CompletePath("../", "testdata/payload/ukpolice.json")) + + // assert that PayloadFile is working + assert.True(t, bytes.Equal(data, testData)) + + w.WriteHeader(200) + } + mux.HandleFunc("/"+foo, handler) + + url := fmt.Sprintf("http://localhost:%d/", addr.Port) + foo + + // valid collect HTTP task... should succeed + warmupTrue := true + ct := &collectHTTPTask{ + TaskMeta: TaskMeta{ + Task: StringPointer(CollectHTTPTaskName), + }, + With: collectHTTPInputs{ + endpoint: endpoint{ + Duration: StringPointer("1s"), + PayloadFile: StringPointer(CompletePath("../", "testdata/payload/ukpolice.json")), + Headers: map[string]string{}, + URL: url, + }, + Warmup: &warmupTrue, + }, + } + + exp := &Experiment{ + Spec: []Task{ct}, + Result: &ExperimentResult{}, + Metadata: ExperimentMetadata{ + Name: myName, + Namespace: myNamespace, + }, + } + exp.initResults(1) + err = ct.run(exp) + assert.NoError(t, err) + assert.True(t, called) // ensure that the /foo/ handler is called + + // warmup option ensures that Fortio results are not written to insights + assert.Nil(t, exp.Result.Insights) +} + +func TestRunCollectHTTPWithIncorrectNumVersions(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + + mux, addr := fhttp.DynamicHTTPServer(false) + + // /foo/ handler + called := false // ensure that the /foo/ handler is called + handler := func(w http.ResponseWriter, r *http.Request) { + called = true + data, _ := io.ReadAll(r.Body) + testData, _ := os.ReadFile(CompletePath("../", "testdata/payload/ukpolice.json")) + + // assert that PayloadFile is working + assert.True(t, bytes.Equal(data, testData)) + + w.WriteHeader(200) + } + mux.HandleFunc("/"+foo, handler) + + url := fmt.Sprintf("http://localhost:%d/", addr.Port) + foo + + // valid collect HTTP task... should succeed + ct := &collectHTTPTask{ + TaskMeta: TaskMeta{ + Task: StringPointer(CollectHTTPTaskName), + }, + With: collectHTTPInputs{ + endpoint: endpoint{ + Duration: StringPointer("1s"), + PayloadFile: StringPointer(CompletePath("../", "testdata/payload/ukpolice.json")), + Headers: map[string]string{}, + URL: url, + }, + }, + } + + exp := &Experiment{ + Spec: []Task{ct}, + Result: &ExperimentResult{}, + Metadata: ExperimentMetadata{ + Name: myName, + Namespace: myNamespace, + }, + } + exp.initResults(1) + + exp.Result.Insights = &Insights{ + NumVersions: 2, // will cause http task to fail; grpc task expects insights been nil or numVersions set to 1 + } + + err = ct.run(exp) + assert.Error(t, err) // fail because of initInsightsWithNumVersions() + + assert.True(t, called) // ensure that the /foo/ handler is called + + // error ensures that Fortio results are not written to insights + assert.Nil(t, exp.Result.Insights.TaskData) } diff --git a/base/experiment_test.go b/base/experiment_test.go index 2c3b94d69..8be431d06 100644 --- a/base/experiment_test.go +++ b/base/experiment_test.go @@ -145,6 +145,32 @@ func TestFailExperiment(t *testing.T) { assert.False(t, exp.NoFailure()) } +func TestUnmarshalJSON(t *testing.T) { + tests := []struct { + specBytes string + errMessage string + }{ + { + specBytes: `[{"task":"ready"}]`, + }, + { + specBytes: `[{"task":"http"}]`, + }, + { + specBytes: `[{"task":"grpc"}]`, + }, + { + specBytes: `[{"task":"notify"}]`, + }, + } + + for _, test := range tests { + exp := ExperimentSpec{} + err := exp.UnmarshalJSON([]byte(test.specBytes)) + assert.NoError(t, err) + } +} + func TestUnmarshalJSONError(t *testing.T) { tests := []struct { specBytes string @@ -171,3 +197,18 @@ func TestUnmarshalJSONError(t *testing.T) { assert.EqualError(t, err, test.errMessage) } } + +func TestInitInsightsWithNumVersions(t *testing.T) { + r := ExperimentResult{ + Insights: &Insights{ + NumVersions: 1, + }, + } + + err := r.initInsightsWithNumVersions(1) + assert.NoError(t, err) + + // Mismatching version numbers + err = r.initInsightsWithNumVersions(2) + assert.Error(t, err) +} diff --git a/base/kubedriver_test.go b/base/kubedriver_test.go new file mode 100644 index 000000000..8e97b9b91 --- /dev/null +++ b/base/kubedriver_test.go @@ -0,0 +1,15 @@ +package base + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "helm.sh/helm/v3/pkg/cli" +) + +func TestInitKube(t *testing.T) { + kubeDriver := NewKubeDriver(cli.New()) + err := kubeDriver.initKube() + + assert.NoError(t, err) +} diff --git a/base/notify.go b/base/notify.go index 00be970f3..b1ac1074d 100644 --- a/base/notify.go +++ b/base/notify.go @@ -45,9 +45,10 @@ type notifyTask struct { With notifyInputs `json:"with" yaml:"with"` } -// Report is the data that is given to the payload template -type Report struct { - // Timestamp is when the report was created +// Summary is the data that is given to the payload template +// Summary is a subset of the data contained in Experiment +type Summary struct { + // Timestamp is when the summary was created // For example: 2022-08-09 15:10:36.569745 -0400 EDT m=+12.599643189 TimeStamp string `json:"timeStamp" yaml:"timeStamp"` @@ -67,10 +68,10 @@ type Report struct { Experiment *Experiment `json:"experiment" yaml:"experiment"` } -// getReport gets the values for the payload template -func getReport(exp *Experiment) map[string]Report { - return map[string]Report{ - "Report": { +// getSummary gets the values for the payload template +func getSummary(exp *Experiment) map[string]Summary { + return map[string]Summary{ + "Summary": { TimeStamp: time.Now().String(), Completed: exp.Completed(), NoTaskFailures: exp.NoFailure(), @@ -87,7 +88,7 @@ func getReport(exp *Experiment) map[string]Report { } // getPayload fetches the payload template from the PayloadTemplateURL and -// executes it with values from getReport() +// executes it with values from getSummary() func (t *notifyTask) getPayload(exp *Experiment) (string, error) { if t.With.PayloadTemplateURL != "" { template, err := getTextTemplateFromURL(t.With.PayloadTemplateURL) @@ -95,7 +96,7 @@ func (t *notifyTask) getPayload(exp *Experiment) (string, error) { return "", err } - values := getReport(exp) + values := getSummary(exp) // get the metrics spec var buf bytes.Buffer diff --git a/base/notify_test.go b/base/notify_test.go index 761bb9072..ba9a59b59 100644 --- a/base/notify_test.go +++ b/base/notify_test.go @@ -55,9 +55,9 @@ func TestNotify(t *testing.T) { } type testNotification struct { - Text string `json:"text" yaml:"text"` - TextReport string `json:"textReport" yaml:"textReport"` - Report Report `json:"report" yaml:"report"` + Text string `json:"text" yaml:"text"` + TextSummary string `json:"textSummary" yaml:"textSummary"` + Summary Summary `json:"summary" yaml:"summary"` } // POST method and PayloadTemplateURL @@ -76,8 +76,8 @@ func TestNotifyWithPayload(t *testing.T) { httpmock.RegisterResponder(http.MethodGet, testNotifyURL+templatePath, httpmock.NewStringResponder(200, `{ "text": "hello world", - "textReport": "{{ regexReplaceAll "\"" (regexReplaceAll "\n" (.Report | toPrettyJson) "\\n") "\\\""}}", - "report": {{ .Report | toPrettyJson }} + "textSummary": "{{ regexReplaceAll "\"" (regexReplaceAll "\n" (.Summary | toPrettyJson) "\\n") "\\\""}}", + "summary": {{ .Summary | toPrettyJson }} }`)) // notify endpoint @@ -102,16 +102,16 @@ func TestNotifyWithPayload(t *testing.T) { // check text assert.Equal(t, notification.Text, "hello world") - // check textReport - var textReportReport Report - err = json.Unmarshal([]byte(notification.TextReport), &textReportReport) + // check summary + var summary Summary + err = json.Unmarshal([]byte(notification.TextSummary), &summary) if err != nil { assert.Fail(t, "could not JSON unmarshal textReport in notification") } - assert.Equal(t, textReportReport.NumTasks, 1) + assert.Equal(t, summary.NumTasks, 1) // check report - assert.Equal(t, notification.Report.NumTasks, 1) + assert.Equal(t, notification.Summary.NumTasks, 1) return httpmock.NewStringResponse(200, "success"), nil }, diff --git a/base/summarymetrics/doc.go b/base/summarymetrics/doc.go deleted file mode 100644 index 9560ba568..000000000 --- a/base/summarymetrics/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package summarymetrics provides a summary metric implementation -package summarymetrics diff --git a/base/summarymetrics/summary_metric.go b/base/summarymetrics/summary_metric.go deleted file mode 100644 index ac40af393..000000000 --- a/base/summarymetrics/summary_metric.go +++ /dev/null @@ -1,106 +0,0 @@ -package summarymetrics - -// summary_metric.go - defines a summary metric object. For space efficiency it is an array of 6 float64. - -import ( - "fmt" - "math" -) - -// SummaryMetric is summary metric type -type SummaryMetric [5]float64 - -const ( - countIdx = 0 - sumIdx = 1 - minIdx = 2 - maxIdx = 3 - ssIdx = 4 -) - -// EmptySummaryMetric returns metric object without any values added -func EmptySummaryMetric() *SummaryMetric { - m := SummaryMetric{ - 0, // Count - 0, // Sum - math.MaxFloat64, // Min - math.SmallestNonzeroFloat64, // Max - 0, // SumSquares - } - return &m -} - -// Count returns the number of observed values summarized by the metric -func (m *SummaryMetric) Count() uint32 { - return uint32(math.Round((*m)[countIdx])) -} - -// SetCount sets the number of observed values summarized by the metric -func (m *SummaryMetric) SetCount(v uint32) { - (*m)[countIdx] = float64(v) -} - -// Sum is the sum of the observed values -func (m *SummaryMetric) Sum() float64 { - return (*m)[sumIdx] -} - -// SetSum sets the sum of the observed values -func (m *SummaryMetric) SetSum(v float64) { - (*m)[sumIdx] = v -} - -// Min is the minimum of the observed values -func (m *SummaryMetric) Min() float64 { - return (*m)[minIdx] -} - -// SetMin sets the minimum of the observed values -func (m *SummaryMetric) SetMin(v float64) { - if v < m.Min() { - (*m)[minIdx] = v - } -} - -// Max is the maximum of the observed values -func (m *SummaryMetric) Max() float64 { - return (*m)[maxIdx] -} - -// SetMax sets the maximum of the observed values -func (m *SummaryMetric) SetMax(v float64) { - if v > m.Max() { - (*m)[maxIdx] = v - } -} - -// SumSquares is the sum of the squares of the observed values -func (m *SummaryMetric) SumSquares() float64 { - return (*m)[ssIdx] -} - -// SetSumSquares sets the sum of the squares of the observed values -func (m *SummaryMetric) SetSumSquares(v float64) { - (*m)[ssIdx] = v -} - -// Add adds an observed value to the metric -func (m *SummaryMetric) Add(value float64) *SummaryMetric { - m.SetCount(m.Count() + 1) - m.SetSum(m.Sum() + value) - m.SetMin(value) - m.SetMax(value) - m.SetSumSquares(m.SumSquares() + (value * value)) - return m -} - -// String returns a string representing the metric (not all fields are included) -func (m *SummaryMetric) String() string { - return fmt.Sprintf("[%d] %f, %f, %f, %f", - m.Count(), - m.Min(), - m.Max(), - m.Sum(), - m.SumSquares(), - ) -} diff --git a/base/summarymetrics/summary_metric_test.go b/base/summarymetrics/summary_metric_test.go deleted file mode 100644 index a1e143f44..000000000 --- a/base/summarymetrics/summary_metric_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package summarymetrics - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestVersionAndSummaryMetric(t *testing.T) { - m := EmptySummaryMetric() - - assert.NotNil(t, m) - // new metric is empty - assert.Equal(t, uint32(0), m.Count()) - assert.Equal(t, float64(0), m.Sum()) - - // add values - m.Add(float64(27)) - m.Add(float64(56)) - assert.Equal(t, uint32(2), m.Count()) - assert.Equal(t, float64(27), m.Min()) - assert.Equal(t, float64(56), m.Max()) - assert.Equal(t, float64(83), m.Sum()) - assert.Equal(t, float64(3865), m.SumSquares()) - assert.Equal(t, "[2] 27.000000, 56.000000, 83.000000, 3865.000000", m.String()) -} diff --git a/base/test_helpers.go b/base/test_helpers.go index 111fedcd6..6356ed77d 100644 --- a/base/test_helpers.go +++ b/base/test_helpers.go @@ -14,6 +14,14 @@ import ( "github.com/stretchr/testify/assert" ) +const ( + // ExperimentFile is the name of the experiment file + ExperimentFile = "experiment.yaml" + + // ExperimentTemplateFile is the name of the template that will produce the experiment file + ExperimentTemplateFile = "experiment.tpl" +) + // mockDriver is a mock driver that can be used to run experiments type mockDriver struct { *Experiment diff --git a/charts/iter8/Chart.yaml b/charts/iter8/Chart.yaml index 820a395f7..62ed3f66c 100644 --- a/charts/iter8/Chart.yaml +++ b/charts/iter8/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: iter8 -version: 0.16.1 +version: 0.16.2 description: Iter8 experiment chart type: application home: https://iter8.tools diff --git a/charts/iter8/README.md b/charts/iter8/README.md index 1adae6301..117bb95b2 100644 --- a/charts/iter8/README.md +++ b/charts/iter8/README.md @@ -1,3 +1,3 @@ -# Iter8 experiment chart +# Iter8 test runner chart -This chart enables Iter8 experiments through the Iter8 CLI. +This chart enables Iter8 tests. \ No newline at end of file diff --git a/charts/iter8/templates/_k-job.tpl b/charts/iter8/templates/_k-job.tpl index 59b906ca8..cc338a23b 100644 --- a/charts/iter8/templates/_k-job.tpl +++ b/charts/iter8/templates/_k-job.tpl @@ -4,13 +4,13 @@ kind: Job metadata: name: {{ .Release.Name }}-{{ .Release.Revision }}-job annotations: - iter8.tools/group: {{ .Release.Name }} + iter8.tools/test: {{ .Release.Name }} iter8.tools/revision: {{ .Release.Revision | quote }} spec: template: metadata: labels: - iter8.tools/group: {{ .Release.Name }} + iter8.tools/test: {{ .Release.Name }} annotations: sidecar.istio.io/inject: "false" spec: diff --git a/charts/iter8/templates/_k-role.tpl b/charts/iter8/templates/_k-role.tpl index 1602f66fe..43c19e557 100644 --- a/charts/iter8/templates/_k-role.tpl +++ b/charts/iter8/templates/_k-role.tpl @@ -4,7 +4,7 @@ kind: Role metadata: name: {{ .Release.Name }} annotations: - iter8.tools/group: {{ .Release.Name }} + iter8.tools/test: {{ .Release.Name }} rules: - apiGroups: [""] resourceNames: [{{ .Release.Name | quote }}] @@ -20,7 +20,7 @@ metadata: name: {{ .Release.Name }}-ready namespace: {{ $namespace }} annotations: - iter8.tools/group: {{ .Release.Name }} + iter8.tools/test: {{ .Release.Name }} rules: {{- if .Values.ready.service }} - apiGroups: [""] diff --git a/charts/iter8/templates/_k-rolebinding.tpl b/charts/iter8/templates/_k-rolebinding.tpl index 5487d0505..d10dc9772 100644 --- a/charts/iter8/templates/_k-rolebinding.tpl +++ b/charts/iter8/templates/_k-rolebinding.tpl @@ -4,7 +4,7 @@ kind: RoleBinding metadata: name: {{ .Release.Name }} annotations: - iter8.tools/group: {{ .Release.Name }} + iter8.tools/test: {{ .Release.Name }} subjects: - kind: ServiceAccount name: {{ .Release.Name }}-iter8-sa @@ -23,7 +23,7 @@ metadata: name: {{ .Release.Name }}-ready namespace: {{ $namespace }} annotations: - iter8.tools/group: {{ .Release.Name }} + iter8.tools/test: {{ .Release.Name }} subjects: - kind: ServiceAccount name: {{ .Release.Name }}-iter8-sa diff --git a/charts/iter8/templates/_k-secret.tpl b/charts/iter8/templates/_k-secret.tpl index 8080f821c..3dbc18b16 100644 --- a/charts/iter8/templates/_k-secret.tpl +++ b/charts/iter8/templates/_k-secret.tpl @@ -4,7 +4,7 @@ kind: Secret metadata: name: {{ .Release.Name }} annotations: - iter8.tools/group: {{ .Release.Name }} + iter8.tools/test: {{ .Release.Name }} stringData: experiment.yaml: | {{ include "experiment" . | indent 4 }} diff --git a/charts/iter8/templates/_payload-github.tpl b/charts/iter8/templates/_payload-github.tpl deleted file mode 100644 index d92fab4f2..000000000 --- a/charts/iter8/templates/_payload-github.tpl +++ /dev/null @@ -1,4 +0,0 @@ -{ - "event_type": "iter8", - "client_payload": {{ .Report | toPrettyJson }} -} \ No newline at end of file diff --git a/charts/iter8/templates/_payload-slack.tpl b/charts/iter8/templates/_payload-slack.tpl deleted file mode 100644 index 1a42053fc..000000000 --- a/charts/iter8/templates/_payload-slack.tpl +++ /dev/null @@ -1,3 +0,0 @@ -{ - "text": "Your Iter8 report is ready: {{ regexReplaceAll "\"" (regexReplaceAll "\n" (.Report | toPrettyJson) "\\n") "\\\""}}" -} diff --git a/charts/litmuschaos/Chart.yaml b/charts/litmuschaos/Chart.yaml deleted file mode 100644 index 3c2b805f7..000000000 --- a/charts/litmuschaos/Chart.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -appVersion: "2.13.0" -description: A Helm chart to perform Litmus Chaos experiments -name: litmuschaos -version: 0.0.4 -home: https://litmuschaos.io -sources: - - https://github.com/litmuschaos/litmus -keywords: - - chaos-engineering - - resiliency - - kubernetes - - pod -maintainers: - - name: Shubham Chaudhary - email: shubham.chaudhary@harness.io - - name: Srinivasan Parthasarathy - email: spartha@us.ibm.com -icon: https://raw.githubusercontent.com/litmuschaos/icons/master/litmus.png diff --git a/charts/litmuschaos/templates/_chaosengine.tpl b/charts/litmuschaos/templates/_chaosengine.tpl deleted file mode 100644 index fa62d11c1..000000000 --- a/charts/litmuschaos/templates/_chaosengine.tpl +++ /dev/null @@ -1,23 +0,0 @@ -{{- define "chaosengine" -}} -apiVersion: litmuschaos.io/v1alpha1 -kind: ChaosEngine -metadata: - name: {{ .Chart.Name }}-{{ .Release.Name }} -spec: - appinfo: - appns: "{{ .Release.Namespace }}" - applabel: "{{ required ".Values.applabel is required!" .Values.applabel }}" - appkind: "{{ .Values.appkind }}" - # It can be active/stop - engineState: 'active' - chaosServiceAccount: {{ .Chart.Name }}-{{ .Release.Name }} - experiments: - - name: pod-delete - spec: - components: - env: - - name: TOTAL_CHAOS_DURATION - value: {{ required ".Values.totalChaosDuration is required!" .Values.totalChaosDuration | quote }} - - name: CHAOS_INTERVAL - value: {{ required ".Values.chaosInterval is required!" .Values.chaosInterval | quote }} -{{- end }} diff --git a/charts/litmuschaos/templates/_chaosexperiment.tpl b/charts/litmuschaos/templates/_chaosexperiment.tpl deleted file mode 100644 index babf5fc79..000000000 --- a/charts/litmuschaos/templates/_chaosexperiment.tpl +++ /dev/null @@ -1,90 +0,0 @@ -{{- define "chaosexperiment" -}} -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Deletes a pod belonging to a deployment/statefulset/daemonset -kind: ChaosExperiment -metadata: - name: pod-delete - labels: - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: {{ .Chart.AppVersion }} -spec: - definition: - scope: Namespaced - permissions: - - apiGroups: - - "" - - "apps" - - "apps.openshift.io" - - "argoproj.io" - - "batch" - - "litmuschaos.io" - resources: - - "deployments" - - "jobs" - - "pods" - - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" - - "pods/exec" - - "events" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - - "deletecollection" - image: "litmuschaos/go-runner:{{ .Chart.AppVersion }}" - imagePullPolicy: Always - args: - - -c - - ./experiments -name pod-delete - command: - - /bin/bash - env: - - - name: TOTAL_CHAOS_DURATION - value: '15' - - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' - - - name: FORCE - value: 'true' - - - name: CHAOS_INTERVAL - value: '5' - - ## percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: '' - - - name: LIB - value: 'litmus' - - - name: TARGET_PODS - value: '' - - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' - - labels: - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: {{ .Chart.AppVersion }} -{{- end }} diff --git a/charts/litmuschaos/templates/_rbac.tpl b/charts/litmuschaos/templates/_rbac.tpl deleted file mode 100644 index b94cf83eb..000000000 --- a/charts/litmuschaos/templates/_rbac.tpl +++ /dev/null @@ -1,45 +0,0 @@ -{{- define "rbac" -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Chart.Name }}-{{ .Release.Name }} - labels: - app.kubernetes.io/part-of: litmus ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ .Chart.Name }}-{{ .Release.Name }} - labels: - app.kubernetes.io/part-of: litmus -rules: -- apiGroups: [""] - resources: ["pods","events"] - verbs: ["create","list","get","patch","update","delete","deletecollection"] -- apiGroups: [""] - resources: ["pods/exec","pods/log","replicationcontrollers"] - verbs: ["create","list","get"] -- apiGroups: ["batch"] - resources: ["jobs"] - verbs: ["create","list","get","delete","deletecollection"] -- apiGroups: ["apps"] - resources: ["deployments","statefulsets","daemonsets","replicasets"] - verbs: ["list","get"] -- apiGroups: ["litmuschaos.io"] - resources: ["chaosengines","chaosexperiments","chaosresults"] - verbs: ["create","list","get","patch","update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ .Chart.Name }}-{{ .Release.Name }} - labels: - app.kubernetes.io/part-of: litmus -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ .Chart.Name }}-{{ .Release.Name }} -subjects: -- kind: ServiceAccount - name: {{ .Chart.Name }}-{{ .Release.Name }} -{{- end }} diff --git a/charts/litmuschaos/templates/resources.yaml b/charts/litmuschaos/templates/resources.yaml deleted file mode 100644 index 837405a1e..000000000 --- a/charts/litmuschaos/templates/resources.yaml +++ /dev/null @@ -1,5 +0,0 @@ -{{ include "chaosengine" . }} ---- -{{ include "chaosexperiment" . }} ---- -{{ include "rbac" . }} diff --git a/charts/litmuschaos/values.yaml b/charts/litmuschaos/values.yaml deleted file mode 100644 index 08e3b8b0d..000000000 --- a/charts/litmuschaos/values.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Type of kubernetes workload -appkind: deployment - -# Total Chaos Duration: keep killing pods for an hour -totalChaosDuration: "3600" - -# Chaos Interval: kill a pod every 10 seconds -chaosInterval: "10" diff --git a/cmd/controllers.go b/cmd/controllers.go index ffce71f5b..9430ac6fc 100644 --- a/cmd/controllers.go +++ b/cmd/controllers.go @@ -19,7 +19,7 @@ import ( const controllersDesc = ` Start Iter8 controllers. - iter8 controllers + $ iter8 controllers ` // newControllersCmd creates the Iter8 controllers diff --git a/cmd/k.go b/cmd/k.go index 63450eba7..1f9cd6437 100644 --- a/cmd/k.go +++ b/cmd/k.go @@ -15,9 +15,9 @@ var kcmd = &cobra.Command{ Long: "Work with Kubernetes experiments", } -// addExperimentGroupFlag adds the experiment group flag -func addExperimentGroupFlag(cmd *cobra.Command, groupP *string) { - cmd.Flags().StringVarP(groupP, "group", "g", driver.DefaultExperimentGroup, "name of the experiment group") +// addTestFlag adds the test flag +func addTestFlag(cmd *cobra.Command, testP *string) { + cmd.Flags().StringVarP(testP, "test", "t", driver.DefaultTestName, "name of the test") } func init() { @@ -40,15 +40,6 @@ func init() { os.Exit(1) } - // add k delete - kcmd.AddCommand(newKDeleteCmd(kd, os.Stdout)) - - // add k launch - kcmd.AddCommand(newKLaunchCmd(kd, os.Stdout)) - - // add k log - kcmd.AddCommand(newKLogCmd(kd)) - // add k run kcmd.AddCommand(newKRunCmd(kd, os.Stdout)) } diff --git a/cmd/kdelete.go b/cmd/kdelete.go deleted file mode 100644 index a302a70a1..000000000 --- a/cmd/kdelete.go +++ /dev/null @@ -1,34 +0,0 @@ -package cmd - -import ( - "io" - - ia "github.com/iter8-tools/iter8/action" - "github.com/iter8-tools/iter8/driver" - "github.com/spf13/cobra" -) - -// kdeleteDesc is the description of the delete cmd -const kdeleteDesc = ` -Delete an experiment (group) in Kubernetes. - - iter8 k delete -` - -// newKDeleteCmd deletes an experiment group in Kubernetes. -func newKDeleteCmd(kd *driver.KubeDriver, out io.Writer) *cobra.Command { - actor := ia.NewDeleteOpts(kd) - - cmd := &cobra.Command{ - Use: "delete", - Short: "Delete an experiment (group) in Kubernetes", - Long: kdeleteDesc, - SilenceUsage: true, - RunE: func(_ *cobra.Command, _ []string) error { - return actor.KubeRun() - }, - } - addExperimentGroupFlag(cmd, &actor.Group) - actor.EnvSettings = settings - return cmd -} diff --git a/cmd/kdelete_test.go b/cmd/kdelete_test.go deleted file mode 100644 index a86475b3a..000000000 --- a/cmd/kdelete_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package cmd - -import ( - "fmt" - "os" - "testing" - - "github.com/iter8-tools/iter8/base" - id "github.com/iter8-tools/iter8/driver" -) - -func TestKDelete(t *testing.T) { - _ = os.Chdir(t.TempDir()) - tests := []cmdTestCase{ - // Launch, base case, values from CLI - { - name: "basic k launch", - cmd: fmt.Sprintf("k launch -c %v --localChart --set tasks={http} --set http.url=https://httpbin.org/get --set http.duration=2s", base.CompletePath("../charts", "iter8")), - golden: base.CompletePath("../testdata", "output/klaunch.txt"), - }, - // Launch again, values from CLI - { - name: "launch again", - cmd: fmt.Sprintf("k launch -c %v --localChart --set tasks={http} --set http.url=https://httpbin.org/get --set http.duration=2s", base.CompletePath("../charts", "iter8")), - }, - // Delete - { - name: "delete", - cmd: "k delete", - golden: base.CompletePath("../testdata", "output/kdelete.txt"), - }, - } - - // fake kube cluster - *kd = *id.NewFakeKubeDriver(settings) - runTestActionCmd(t, tests) -} diff --git a/cmd/klaunch.go b/cmd/klaunch.go deleted file mode 100644 index 8fe518957..000000000 --- a/cmd/klaunch.go +++ /dev/null @@ -1,110 +0,0 @@ -package cmd - -import ( - "errors" - "io" - - ia "github.com/iter8-tools/iter8/action" - "github.com/iter8-tools/iter8/driver" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -// klaunchDesc is the description of the k launch cmd -const klaunchDesc = ` -Launch an experiment inside a Kubernetes cluster. - - iter8 k launch --set "tasks={http}" --set http.url=https://httpbin.org/get - -Use the dry option to simulate a Kubernetes experiment. This creates the manifest.yaml file, but does not run the experiment, and does not deploy any experiment resource objects in the cluster. - - iter8 k launch \ - --set http.url=https://httpbin.org/get \ - --dry - -The launch command creates the 'charts' subdirectory under the current working directory, downloads the Iter8 experiment chart, and places it under 'charts'. This behavior can be controlled using various launch flags. - -This command supports setting values using the same mechanisms as in Helm. Please see https://helm.sh/docs/chart_template_guide/values_files/ for more detailed descriptions. In particular, this command supports the --set, --set-file, --set-string, and -f (--values) options all of which have the same behavior as in Helm. -` - -// newKLaunchCmd creates the Kubernetes launch command -func newKLaunchCmd(kd *driver.KubeDriver, out io.Writer) *cobra.Command { - actor := ia.NewLaunchOpts(kd) - - cmd := &cobra.Command{ - Use: "launch", - Short: "Launch an experiment inside a Kubernetes cluster", - Long: klaunchDesc, - SilenceUsage: true, - PreRunE: func(cmd *cobra.Command, _ []string) error { - return chartNameIsRequired(actor, cmd.Flags()) - }, - RunE: func(_ *cobra.Command, _ []string) error { - return actor.KubeRun() - }, - } - // flags specific to k launch - addExperimentGroupFlag(cmd, &actor.Group) - addDryRunForKFlag(cmd, &actor.DryRun) - actor.EnvSettings = settings - - // flags shared with launch - // addChartPathOptionsFlags(cmd, &actor.ChartPathOptions) - addChartNameFlag(cmd, &actor.ChartName) - addValueFlags(cmd.Flags(), &actor.Options) - addLocalChartFlag(cmd, &actor.LocalChart) - - return cmd -} - -// chartNameIsRequired makes chartName required if localChart is set -func chartNameIsRequired(lOpts *ia.LaunchOpts, flags *pflag.FlagSet) error { - if flags.Changed("localChart") && !flags.Changed("chartName") { - return errors.New("localChart specified; 'chartName' is required") - } - return nil -} - -// addDryRunForKFlag adds dry run flag to the k launch command -func addDryRunForKFlag(cmd *cobra.Command, dryRunPtr *bool) { - cmd.Flags().BoolVar(dryRunPtr, "dry", false, "simulate an experiment launch; outputs manifest.yaml file") - cmd.Flags().Lookup("dry").NoOptDefVal = "true" -} - -// addChartNameFlag to the command -func addChartNameFlag(cmd *cobra.Command, chartNamePtr *string) { - cmd.Flags().StringVarP(chartNamePtr, "chartName", "c", ia.DefaultChartName, "name of the experiment chart") -} - -// addLocalChartFlag adds the localChart flag to the launch command -func addLocalChartFlag(cmd *cobra.Command, localChartPtr *bool) { - cmd.Flags().BoolVar(localChartPtr, "localChart", false, "use local chart identified by --chartName") - cmd.Flags().Lookup("localChart").NoOptDefVal = "true" -} - -// addChartPathOptionsFlags adds flags related to Helm chart repository -// copied from -// https://github.com/helm/helm/blob/ce66412a723e4d89555dc67217607c6579ffcb21/cmd/helm/flags.go -// func addChartPathOptionsFlags(cmd *cobra.Command, c *action.ChartPathOptions) { -// cmd.Flags().StringVar(&c.Version, "version", "", "specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a valid range (e.g. ^2.0.0). If this is not specified, the latest version is used") -// cmd.Flags().BoolVar(&c.Verify, "verify", false, "verify the package before using it") -// cmd.Flags().StringVar(&c.Keyring, "keyring", defaultKeyring(), "location of public keys used for verification") -// cmd.Flags().StringVar(&c.RepoURL, "repo", "https://iter8-tools.github.io/iter8", "chart repository url where to locate the requested chart") -// cmd.Flags().StringVar(&c.Username, "username", "", "chart repository username where to locate the requested chart") -// cmd.Flags().StringVar(&c.Password, "password", "", "chart repository password where to locate the requested chart") -// cmd.Flags().StringVar(&c.CertFile, "cert-file", "", "identify HTTPS client using this SSL certificate file") -// cmd.Flags().StringVar(&c.KeyFile, "key-file", "", "identify HTTPS client using this SSL key file") -// cmd.Flags().BoolVar(&c.InsecureSkipTLSverify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the chart download") -// cmd.Flags().StringVar(&c.CaFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle") -// cmd.Flags().BoolVar(&c.PassCredentialsAll, "pass-credentials", false, "pass credentials to all domains") -// } - -// // defaultKeyring returns the expanded path to the default keyring. -// // copied from -// // https://github.com/helm/helm/blob/ce66412a723e4d89555dc67217607c6579ffcb21/cmd/helm/dependency_build.go -// func defaultKeyring() string { -// if v, ok := os.LookupEnv("GNUPGHOME"); ok { -// return filepath.Join(v, "pubring.gpg") -// } -// return filepath.Join(homedir.HomeDir(), ".gnupg", "pubring.gpg") -// } diff --git a/cmd/klaunch_test.go b/cmd/klaunch_test.go deleted file mode 100644 index cf79084ab..000000000 --- a/cmd/klaunch_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package cmd - -import ( - "fmt" - "os" - "testing" - - "github.com/iter8-tools/iter8/base" - id "github.com/iter8-tools/iter8/driver" -) - -func TestKLaunch(t *testing.T) { - _ = os.Chdir(t.TempDir()) - tests := []cmdTestCase{ - // Launch, base case, values from CLI - { - name: "basic k launch", - cmd: fmt.Sprintf("k launch -c %v --localChart --set tasks={http} --set http.url=https://httpbin.org/get --set http.duration=2s", base.CompletePath("../charts", "iter8")), - golden: base.CompletePath("../testdata", "output/klaunch.txt"), - }, - } - - // fake kube cluster - *kd = *id.NewFakeKubeDriver(settings) - - runTestActionCmd(t, tests) -} diff --git a/cmd/klog.go b/cmd/klog.go deleted file mode 100644 index 712f11b3d..000000000 --- a/cmd/klog.go +++ /dev/null @@ -1,39 +0,0 @@ -package cmd - -import ( - ia "github.com/iter8-tools/iter8/action" - "github.com/iter8-tools/iter8/base/log" - "github.com/iter8-tools/iter8/driver" - "github.com/spf13/cobra" -) - -// klogDesc is the description of the k log cmd -const klogDesc = ` -Fetch logs for a Kubernetes experiment. - - iter8 k log -` - -// newKLogCmd creates the Kubernetes log command -func newKLogCmd(kd *driver.KubeDriver) *cobra.Command { - actor := ia.NewLogOpts(kd) - - cmd := &cobra.Command{ - Use: "log", - Short: "Fetch logs for a Kubernetes experiment", - Long: klogDesc, - SilenceUsage: true, - RunE: func(_ *cobra.Command, _ []string) error { - var lg string - var err error - if lg, err = actor.KubeRun(); err != nil { - return err - } - log.Logger.WithIndentedTrace(lg).Info("experiment logs from Kubernetes cluster") - return nil - }, - } - addExperimentGroupFlag(cmd, &actor.Group) - actor.EnvSettings = settings - return cmd -} diff --git a/cmd/klog_test.go b/cmd/klog_test.go deleted file mode 100644 index 1af5c8dac..000000000 --- a/cmd/klog_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package cmd - -import ( - "context" - "fmt" - "os" - "testing" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - id "github.com/iter8-tools/iter8/driver" - "github.com/stretchr/testify/assert" - - "github.com/iter8-tools/iter8/base" -) - -func TestKLog(t *testing.T) { - // define METRICS_SERVER_URL - metricsServerURL := "http://iter8.default:8080" - err := os.Setenv(base.MetricsServerURL, metricsServerURL) - assert.NoError(t, err) - - _ = os.Chdir(t.TempDir()) - tests := []cmdTestCase{ - // k launch - { - name: "k launch", - cmd: fmt.Sprintf("k launch -c %v --localChart --set tasks={http} --set http.url=https://httpbin.org/get --set http.duration=2s", base.CompletePath("../charts", "iter8")), - golden: base.CompletePath("../testdata", "output/klaunch.txt"), - }, - // k assert - { - name: "k log", - cmd: "k log", - golden: base.CompletePath("../testdata", "output/klog.txt"), - }, - } - - // mock the environment - // fake kube cluster - *kd = *id.NewFakeKubeDriver(settings) - _, _ = kd.Clientset.CoreV1().Pods("default").Create(context.TODO(), &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default-1-job-8218s", - Namespace: "default", - Labels: map[string]string{ - "iter8.tools/group": "default", - }, - }, - }, metav1.CreateOptions{}) - - runTestActionCmd(t, tests) -} diff --git a/cmd/krun.go b/cmd/krun.go index 2d713eb76..1dc573545 100644 --- a/cmd/krun.go +++ b/cmd/krun.go @@ -10,11 +10,11 @@ import ( // krunDesc is the description of the k run command const krunDesc = ` -Run a Kubernetes experiment. This command reads an experiment specified in a secret and writes the result back to the secret. +Run a performance test on Kubernetes. This command reads a test specified in a secret and writes the result back to the secret. - $ iter8 k run --namespace {{ .Experiment.Namespace }} --group {{ .Experiment.group }} + $ iter8 k run --namespace {{ namespace }} --test {{ test name }} -This command is intended for use within the Iter8 Docker image that is used to execute Kubernetes experiments. +This command is intended for use within the Iter8 Docker image that is used to execute Kubernetes tests. ` // newKRunCmd creates the Kubernetes run command @@ -23,7 +23,7 @@ func newKRunCmd(kd *driver.KubeDriver, out io.Writer) *cobra.Command { actor.EnvSettings = settings cmd := &cobra.Command{ Use: "run", - Short: "Run a Kubernetes experiment", + Short: "Run a performance test on Kubernetes", Long: krunDesc, SilenceUsage: true, Hidden: true, @@ -31,6 +31,6 @@ func newKRunCmd(kd *driver.KubeDriver, out io.Writer) *cobra.Command { return actor.KubeRun() }, } - addExperimentGroupFlag(cmd, &actor.Group) + addTestFlag(cmd, &actor.Test) return cmd } diff --git a/cmd/krun_test.go b/cmd/krun_test.go index 6fb450b7a..2f5daf147 100644 --- a/cmd/krun_test.go +++ b/cmd/krun_test.go @@ -65,13 +65,13 @@ func TestKRun(t *testing.T) { _ = os.Chdir(t.TempDir()) // create experiment.yaml - base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment.tpl"), url, id.ExperimentPath) + base.CreateExperimentYaml(t, base.CompletePath("../testdata", base.ExperimentTemplateFile), url, base.ExperimentFile) tests := []cmdTestCase{ // k report { name: "k run", - cmd: "k run -g default --namespace default", + cmd: "k run -t default --namespace default", golden: base.CompletePath("../testdata", "output/krun.txt"), }, } @@ -80,13 +80,13 @@ func TestKRun(t *testing.T) { *kd = *id.NewFakeKubeDriver(settings) // and read it... - byteArray, _ := os.ReadFile(id.ExperimentPath) + byteArray, _ := os.ReadFile(base.ExperimentFile) _, _ = kd.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: "default", }, - StringData: map[string]string{id.ExperimentPath: string(byteArray)}, + StringData: map[string]string{base.ExperimentFile: string(byteArray)}, }, metav1.CreateOptions{}) runTestActionCmd(t, tests) diff --git a/cmd/root.go b/cmd/root.go index e0f89fee6..42cf85eae 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -7,9 +7,7 @@ import ( "github.com/iter8-tools/iter8/base/log" "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/spf13/pflag" "helm.sh/helm/v3/pkg/cli" - "helm.sh/helm/v3/pkg/cli/values" ) var ( @@ -47,16 +45,6 @@ func Execute() { cobra.CheckErr(rootCmd.Execute()) } -// addValueFlags adds flags that enable supplying values to the given command -// Credit: the following function is from Helm. Please see: -// https://github.com/helm/helm/blob/main/cmd/helm/flags.go -func addValueFlags(f *pflag.FlagSet, v *values.Options) { - f.StringSliceVarP(&v.ValueFiles, "values", "f", []string{}, "specify values in a YAML file or a URL (can specify multiple)") - f.StringArrayVar(&v.Values, "set", []string{}, "set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") - f.StringArrayVar(&v.StringValues, "set-string", []string{}, "set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)") - f.StringArrayVar(&v.FileValues, "set-file", []string{}, "set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2)") -} - // initialize Iter8 CLI root command and add all subcommands func init() { // disable completion command for now diff --git a/cmd/version.go b/cmd/version.go index e4988255d..24390c450 100644 --- a/cmd/version.go +++ b/cmd/version.go @@ -12,7 +12,7 @@ import ( var versionDesc = ` Print the version of Iter8 CLI. - iter8 version + $ iter8 version The output may look as follows: diff --git a/config.yaml b/config.yaml index e45086c2c..fb50a7429 100644 --- a/config.yaml +++ b/config.yaml @@ -1 +1,2 @@ +# Used by the helm/chart-releaser-action action in the releasecharts.yaml workflow skip-existing: true \ No newline at end of file diff --git a/driver/common.go b/driver/common.go index 7bfe4423e..0d85fdb04 100644 --- a/driver/common.go +++ b/driver/common.go @@ -7,10 +7,8 @@ import ( ) const ( - // ExperimentPath is the name of the experiment file - ExperimentPath = "experiment.yaml" - // DefaultExperimentGroup is the name of the default experiment chart - DefaultExperimentGroup = "default" + // DefaultTestName is the default name of the performance test + DefaultTestName = "default" ) // ExperimentFromBytes reads experiment from bytes diff --git a/driver/filedriver.go b/driver/filedriver.go deleted file mode 100644 index b3e3d7161..000000000 --- a/driver/filedriver.go +++ /dev/null @@ -1,53 +0,0 @@ -package driver - -import ( - "errors" - "fmt" - "os" - "path" - - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/base/log" -) - -// FileDriver enables reading and writing experiment spec and result files -type FileDriver struct { - // RunDir is the directory where the experiment.yaml file is to be found - RunDir string -} - -// Read the experiment -func (f *FileDriver) Read() (*base.Experiment, error) { - b, err := os.ReadFile(path.Join(f.RunDir, ExperimentPath)) - if err != nil { - log.Logger.WithStackTrace(err.Error()).Error("unable to read experiment") - return nil, errors.New("unable to read experiment") - } - return ExperimentFromBytes(b) -} - -// Write the experiment -func (f *FileDriver) Write(exp *base.Experiment) error { - // write to metrics server - // get URL of metrics server from environment variable - metricsServerURL, ok := os.LookupEnv(base.MetricsServerURL) - if !ok { - errorMessage := "could not look up METRICS_SERVER_URL environment variable" - log.Logger.Error(errorMessage) - return fmt.Errorf(errorMessage) - } - - err := base.PutExperimentResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, exp.Result) - if err != nil { - errorMessage := "could not write experiment result to metrics service" - log.Logger.Error(errorMessage) - return fmt.Errorf(errorMessage) - } - - return nil -} - -// GetRevision is undefined for file drivers -func (f *FileDriver) GetRevision() int { - return 0 -} diff --git a/driver/filedriver_test.go b/driver/filedriver_test.go deleted file mode 100644 index 695a5ca16..000000000 --- a/driver/filedriver_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package driver - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "testing" - - "fortio.org/fortio/fhttp" - "github.com/iter8-tools/iter8/base" - "github.com/stretchr/testify/assert" -) - -const ( - myName = "myName" - myNamespace = "myNamespace" -) - -func TestLocalRun(t *testing.T) { - // define METRICS_SERVER_URL - metricsServerURL := "http://iter8.default:8080" - err := os.Setenv(base.MetricsServerURL, metricsServerURL) - assert.NoError(t, err) - - // create and configure HTTP endpoint for testing - mux, addr := fhttp.DynamicHTTPServer(false) - url := fmt.Sprintf("http://127.0.0.1:%d/get", addr.Port) - var verifyHandlerCalled bool - mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) - - // mock metrics server - base.StartHTTPMock(t) - metricsServerCalled := false - base.MockMetricsServer(base.MockMetricsServerInput{ - MetricsServerURL: metricsServerURL, - ExperimentResultCallback: func(req *http.Request) { - metricsServerCalled = true - - // check query parameters - assert.Equal(t, myName, req.URL.Query().Get("experiment")) - assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - - // check payload - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.NotNil(t, body) - - // check payload content - bodyExperimentResult := base.ExperimentResult{} - err = json.Unmarshal(body, &bodyExperimentResult) - assert.NoError(t, err) - assert.NotNil(t, body) - - // no experiment failure - assert.False(t, bodyExperimentResult.Failure) - }, - }) - - _ = os.Chdir(t.TempDir()) - - // create experiment.yaml - base.CreateExperimentYaml(t, base.CompletePath("../testdata/drivertests", "experiment.tpl"), url, ExperimentPath) - - fd := FileDriver{ - RunDir: ".", - } - err = base.RunExperiment(&fd) - assert.NoError(t, err) - // sanity check -- handler was called - assert.True(t, verifyHandlerCalled) - assert.True(t, metricsServerCalled) -} - -func TestFileDriverReadError(t *testing.T) { - _ = os.Chdir(t.TempDir()) - fd := FileDriver{ - RunDir: ".", - } - exp, err := fd.Read() - assert.Error(t, err) - assert.Nil(t, exp) -} diff --git a/driver/kubedriver.go b/driver/kubedriver.go index d0db248bc..7e980b050 100644 --- a/driver/kubedriver.go +++ b/driver/kubedriver.go @@ -1,20 +1,13 @@ package driver import ( - "bytes" "context" "errors" "fmt" - "io" "os" - "os/signal" - "strings" - "syscall" "time" // Import to initialize client auth plugins. - "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" // auth import enables automated authentication to various hosted clouds _ "k8s.io/client-go/plugin/pkg/client/auth" @@ -27,8 +20,6 @@ import ( "github.com/iter8-tools/iter8/base/log" "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/cli" - "helm.sh/helm/v3/pkg/cli/values" - "helm.sh/helm/v3/pkg/getter" "helm.sh/helm/v3/pkg/release" "k8s.io/client-go/kubernetes" @@ -56,9 +47,9 @@ type KubeDriver struct { Clientset kubernetes.Interface // Configuration enables Helm-based interaction with a Kubernetes cluster *action.Configuration - // Group is the experiment group - Group string - // revision is the revision of the experiment + // Test is the test name + Test string + // revision is the revision of the test revision int } @@ -66,7 +57,7 @@ type KubeDriver struct { func NewKubeDriver(s *cli.EnvSettings) *KubeDriver { kd := &KubeDriver{ EnvSettings: s, - Group: DefaultExperimentGroup, + Test: DefaultTestName, Configuration: nil, Clientset: nil, } @@ -139,15 +130,15 @@ func (kd *KubeDriver) Init() error { // getLastRelease fetches the last release of an Iter8 experiment func (kd *KubeDriver) getLastRelease() (*release.Release, error) { - log.Logger.Debugf("fetching latest revision for experiment group %v", kd.Group) + log.Logger.Debugf("fetching latest revision for experiment group %v", kd.Test) // getting last revision - rel, err := kd.Configuration.Releases.Last(kd.Group) + rel, err := kd.Configuration.Releases.Last(kd.Test) if err != nil { if helmerrors.Is(err, helmdriver.ErrReleaseNotFound) { log.Logger.Debugf("experiment release not found") return nil, nil } - e := fmt.Errorf("unable to get latest revision for experiment group %v", kd.Group) + e := fmt.Errorf("unable to get latest revision for experiment group %v", kd.Test) log.Logger.WithStackTrace(err.Error()).Error(e) return nil, e } @@ -156,7 +147,7 @@ func (kd *KubeDriver) getLastRelease() (*release.Release, error) { // getExperimentSecretName yields the name of the experiment secret func (kd *KubeDriver) getExperimentSecretName() string { - return fmt.Sprintf("%v", kd.Group) + return fmt.Sprintf("%v", kd.Test) } // getSecretWithRetry attempts to get a Kubernetes secret with retries @@ -199,9 +190,9 @@ func (kd *KubeDriver) Read() (*base.Experiment, error) { return nil, errors.New("unable to read experiment") } - b, ok := s.Data[ExperimentPath] + b, ok := s.Data[base.ExperimentFile] if !ok { - err = fmt.Errorf("unable to extract experiment; spec secret has no %v field", ExperimentPath) + err = fmt.Errorf("unable to extract experiment; spec secret has no %v field", base.ExperimentFile) log.Logger.Error(err) return nil, err } @@ -234,257 +225,3 @@ func (kd *KubeDriver) Write(exp *base.Experiment) error { func (kd *KubeDriver) GetRevision() int { return kd.revision } - -// writeManifest writes the Kubernetes experiment manifest to a local file -func writeManifest(rel *release.Release) error { - err := os.WriteFile(ManifestFile, []byte(rel.Manifest), 0600) - if err != nil { - log.Logger.WithStackTrace(err.Error()).Error("unable to write kubernetes manifest into ", ManifestFile) - return err - } - log.Logger.Info("wrote kubernetes manifest into ", ManifestFile) - return nil -} - -// Credit: the logic for this function is sourced from Helm -// https://github.com/helm/helm/blob/8ab18f7567cedffdfa5ba4d7f6abfb58efc313f8/cmd/helm/upgrade.go#L69 -// Upgrade a Kubernetes experiment to the next release -func (kd *KubeDriver) upgrade(chartPathOptions action.ChartPathOptions, chartName string, valueOpts values.Options, group string, dry bool) error { - client := action.NewUpgrade(kd.Configuration) - client.Namespace = kd.Namespace() - client.DryRun = dry - - // copy chartpathoptions - // client.ChartPathOptions.CaFile = chartPathOptions.CaFile - // client.ChartPathOptions.CertFile = chartPathOptions.CertFile - // client.ChartPathOptions.KeyFile = chartPathOptions.KeyFile - // client.ChartPathOptions.InsecureSkipTLSverify = chartPathOptions.InsecureSkipTLSverify - // client.ChartPathOptions.Keyring = chartPathOptions.Keyring - // client.ChartPathOptions.Password = chartPathOptions.Password - // client.ChartPathOptions.PassCredentialsAll = chartPathOptions.PassCredentialsAll - client.ChartPathOptions.RepoURL = chartPathOptions.RepoURL - // client.ChartPathOptions.Username = chartPathOptions.Username - // client.ChartPathOptions.Verify = chartPathOptions.Verify - client.ChartPathOptions.Version = chartPathOptions.Version - - cp, err := client.ChartPathOptions.LocateChart(chartName, kd.EnvSettings) - if err != nil { - log.Logger.Error(err) - return err - } - - chartRequested, vals, err := kd.getChartAndVals(cp, valueOpts) - if err != nil { - e := fmt.Errorf("unable to get chart and value for %v", cp) - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - - // Create context and prepare the handle of SIGTERM - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - - // Set up channel on which to send signal notifications. - // We must use a buffered channel or risk missing the signal - // if we're not ready to receive when the signal is sent. - cSignal := make(chan os.Signal, 2) - signal.Notify(cSignal, os.Interrupt, syscall.SIGTERM) - go func() { - <-cSignal - log.Logger.Warnf("experiment for group %s has been cancelled.\n", group) - cancel() - }() - - rel, err := client.RunWithContext(ctx, group, chartRequested, vals) - if err != nil { - e := fmt.Errorf("experiment launch failed") - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - - return kd.updateRevision(rel, dry) -} - -// install a Kubernetes experiment -// Credit: the logic for this function is sourced from Helm -// https://github.com/helm/helm/blob/8ab18f7567cedffdfa5ba4d7f6abfb58efc313f8/cmd/helm/install.go#L177 -func (kd *KubeDriver) install(chartPathOptions action.ChartPathOptions, chartName string, valueOpts values.Options, group string, dry bool) error { - - // buf := new(bytes.Buffer) - client := action.NewInstall(kd.Configuration) - client.Namespace = kd.Namespace() - client.RepoURL = chartPathOptions.RepoURL - client.DryRun = dry - client.ReleaseName = group - - // copy chartPathOptions to client - // client.ChartPathOptions.CaFile = chartPathOptions.CaFile - // client.ChartPathOptions.CertFile = chartPathOptions.CertFile - // client.ChartPathOptions.KeyFile = chartPathOptions.KeyFile - // client.ChartPathOptions.InsecureSkipTLSverify = chartPathOptions.InsecureSkipTLSverify - // client.ChartPathOptions.Keyring = chartPathOptions.Keyring - // client.ChartPathOptions.Password = chartPathOptions.Password - // client.ChartPathOptions.PassCredentialsAll = chartPathOptions.PassCredentialsAll - client.ChartPathOptions.RepoURL = chartPathOptions.RepoURL - // client.ChartPathOptions.Username = chartPathOptions.Username - // client.ChartPathOptions.Verify = chartPathOptions.Verify - client.ChartPathOptions.Version = chartPathOptions.Version - - cp, err := client.ChartPathOptions.LocateChart(chartName, kd.EnvSettings) - if err != nil { - log.Logger.Error(err) - return err - } - - chartRequested, vals, err := kd.getChartAndVals(cp, valueOpts) - if err != nil { - e := fmt.Errorf("unable to get chart and value for %v", cp) - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - - // Create context and prepare the handle of SIGTERM - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - - // Set up channel on which to send signal notifications. - // We must use a buffered channel or risk missing the signal - // if we're not ready to receive when the signal is sent. - cSignal := make(chan os.Signal, 2) - signal.Notify(cSignal, os.Interrupt, syscall.SIGTERM) - go func() { - <-cSignal - log.Logger.Warnf("experiment for group %s has been cancelled.\n", group) - cancel() - }() - - rel, err := client.RunWithContext(ctx, chartRequested, vals) - if err != nil { - e := fmt.Errorf("experiment launch failed") - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - - return kd.updateRevision(rel, dry) -} - -func (kd *KubeDriver) updateRevision(rel *release.Release, dry bool) error { - // upgrading revision info - kd.revision = rel.Version - - // write manifest if dry - if dry { - err := writeManifest(rel) - if err != nil { - return err - } - log.Logger.Info("dry run complete") - } else { - log.Logger.Info("experiment launched. Happy Iter8ing!") - } - return nil -} - -// Launch a Kubernetes experiment -func (kd *KubeDriver) Launch(chartPathOptions action.ChartPathOptions, chartName string, valueOpts values.Options, group string, dry bool) error { - if kd.revision <= 0 { - return kd.install(chartPathOptions, chartName, valueOpts, group, dry) - } - return kd.upgrade(chartPathOptions, chartName, valueOpts, group, dry) -} - -// Delete a Kubernetes experiment group -func (kd *KubeDriver) Delete() error { - client := action.NewUninstall(kd.Configuration) - _, err := client.Run(kd.Group) - if err != nil { - e := fmt.Errorf("deletion of experiment group %v failed", kd.Group) - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - log.Logger.Infof("experiment group %v deleted", kd.Group) - return nil -} - -// getChartAndVals gets experiment chart and its values -// Credit: the logic for this function is sourced from Helm -// https://github.com/helm/helm/blob/8ab18f7567cedffdfa5ba4d7f6abfb58efc313f8/cmd/helm/install.go#L177 -func (kd *KubeDriver) getChartAndVals(chartDir string, valueOpts values.Options) (*chart.Chart, map[string]interface{}, error) { - // form chart values - p := getter.All(kd.EnvSettings) - vals, err := valueOpts.MergeValues(p) - if err != nil { - e := fmt.Errorf("unable to merge chart values") - log.Logger.WithStackTrace(err.Error()).Error(e) - return nil, nil, e - } - - // attempt to load the chart - ch, err := loader.Load(chartDir) - if err != nil { - e := fmt.Errorf("unable to load chart") - log.Logger.WithStackTrace(err.Error()).Error(e) - return nil, nil, e - } - - if err := checkIfInstallable(ch); err != nil { - return nil, nil, err - } - - if ch.Metadata.Deprecated { - log.Logger.Warning("this chart is deprecated") - } - return ch, vals, nil -} - -// checkIfInstallable validates if a chart can be installed -// Only application chart type is installable -// Credit: this function is sourced from Helm -// https://github.com/helm/helm/blob/8ab18f7567cedffdfa5ba4d7f6abfb58efc313f8/cmd/helm/install.go#L270 -func checkIfInstallable(ch *chart.Chart) error { - switch ch.Metadata.Type { - case "", "application": - return nil - } - e := fmt.Errorf("%s charts are not installable", ch.Metadata.Type) - log.Logger.Error(e) - return e -} - -// GetExperimentLogs gets logs for a Kubernetes experiment -func (kd *KubeDriver) GetExperimentLogs() (string, error) { - podsClient := kd.Clientset.CoreV1().Pods(kd.Namespace()) - pods, err := podsClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: fmt.Sprintf("iter8.tools/group=%v", kd.Group), - }) - if err != nil { - e := errors.New("unable to get experiment pod(s)") - log.Logger.Error(e) - return "", e - } - lgs := make([]string, len(pods.Items)) - for i, p := range pods.Items { - req := podsClient.GetLogs(p.Name, &corev1.PodLogOptions{}) - podLogs, err := req.Stream(context.TODO()) - if err != nil { - e := fmt.Errorf("error in opening log stream: %e", err) - log.Logger.Error(e) - return "", e - } - - defer func() { - _ = podLogs.Close() - }() - - buf := new(bytes.Buffer) - _, err = io.Copy(buf, podLogs) - if err != nil { - e := fmt.Errorf("error in copy information from podLogs to buf: %e", err) - log.Logger.Error(e) - return "", e - } - str := buf.String() - lgs[i] = str - } - return strings.Join(lgs, "\n***\n"), nil -} diff --git a/driver/kubedriver_test.go b/driver/kubedriver_test.go index 93e1cecf4..e74bfb526 100644 --- a/driver/kubedriver_test.go +++ b/driver/kubedriver_test.go @@ -12,60 +12,15 @@ import ( "fortio.org/fortio/fhttp" "github.com/iter8-tools/iter8/base" "github.com/stretchr/testify/assert" - "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/cli" - "helm.sh/helm/v3/pkg/cli/values" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func TestKOps(t *testing.T) { - _ = os.Chdir(t.TempDir()) - kd := NewKubeDriver(cli.New()) // we will ignore this value - assert.NotNil(t, kd) - - kd = NewFakeKubeDriver(cli.New()) - err := kd.Init() - assert.NoError(t, err) - - // install - err = kd.install(action.ChartPathOptions{}, base.CompletePath("../", "charts/iter8"), values.Options{ - Values: []string{"tasks={http}", "http.url=https://httpbin.org/get"}, - }, kd.Group, false) - assert.NoError(t, err) - - rel, err := kd.Releases.Last(kd.Group) - assert.NoError(t, err) - assert.NotNil(t, rel) - assert.Equal(t, 1, rel.Version) - assert.Equal(t, 1, kd.revision) - - err = kd.Init() - assert.NoError(t, err) - - // upgrade - err = kd.upgrade(action.ChartPathOptions{}, base.CompletePath("../", "charts/iter8"), values.Options{ - Values: []string{"tasks={http}", "http.url=https://httpbin.org/get"}, - }, kd.Group, false) - assert.NoError(t, err) - - rel, err = kd.Releases.Last(kd.Group) - assert.NotNil(t, rel) - assert.Equal(t, 2, rel.Version) - assert.Equal(t, 2, kd.revision) - assert.NoError(t, err) - - err = kd.Init() - assert.NoError(t, err) - - // delete - err = kd.Delete() - assert.NoError(t, err) - - // delete - err = kd.Delete() - assert.Error(t, err) -} +const ( + myName = "myName" + myNamespace = "myNamespace" +) func TestKubeRun(t *testing.T) { // define METRICS_SERVER_URL @@ -110,18 +65,18 @@ func TestKubeRun(t *testing.T) { _ = os.Chdir(t.TempDir()) // create experiment.yaml - base.CreateExperimentYaml(t, base.CompletePath("../testdata/drivertests", "experiment.tpl"), url, ExperimentPath) + base.CreateExperimentYaml(t, base.CompletePath("../testdata/drivertests", "experiment.tpl"), url, base.ExperimentFile) kd := NewFakeKubeDriver(cli.New()) kd.revision = 1 - byteArray, _ := os.ReadFile(ExperimentPath) + byteArray, _ := os.ReadFile(base.ExperimentFile) _, _ = kd.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: "default", }, - StringData: map[string]string{ExperimentPath: string(byteArray)}, + StringData: map[string]string{base.ExperimentFile: string(byteArray)}, }, metav1.CreateOptions{}) err = base.RunExperiment(kd) @@ -130,47 +85,3 @@ func TestKubeRun(t *testing.T) { assert.True(t, verifyHandlerCalled) assert.True(t, metricsServerCalled) } - -func TestLogs(t *testing.T) { - _ = os.Chdir(t.TempDir()) - kd := NewFakeKubeDriver(cli.New()) - kd.revision = 1 - - byteArray, _ := os.ReadFile(base.CompletePath("../testdata/drivertests", ExperimentPath)) - _, _ = kd.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: "default", - }, - StringData: map[string]string{ExperimentPath: string(byteArray)}, - }, metav1.CreateOptions{}) - _, _ = kd.Clientset.CoreV1().Pods("default").Create(context.TODO(), &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default-1-job-1831a", - Namespace: "default", - Labels: map[string]string{ - "iter8.tools/group": "default", - }, - }, - }, metav1.CreateOptions{}) - - // check logs - str, err := kd.GetExperimentLogs() - assert.NoError(t, err) - assert.Equal(t, "fake logs", str) -} - -func TestDryInstall(t *testing.T) { - _ = os.Chdir(t.TempDir()) - kd := NewFakeKubeDriver(cli.New()) - - err := kd.Launch(action.ChartPathOptions{}, base.CompletePath("../", "charts/iter8"), values.Options{ - ValueFiles: []string{}, - StringValues: []string{}, - Values: []string{"tasks={http}", "http.url=https://localhost:12345"}, - FileValues: []string{}, - }, "default", true) - - assert.NoError(t, err) - assert.FileExists(t, ManifestFile) -} diff --git a/driver/test_helpers.go b/driver/test_helpers.go index 04bf71f49..6e4f8e44f 100644 --- a/driver/test_helpers.go +++ b/driver/test_helpers.go @@ -64,7 +64,7 @@ func initHelmFake(kd *KubeDriver) { func NewFakeKubeDriver(s *cli.EnvSettings, objects ...runtime.Object) *KubeDriver { kd := &KubeDriver{ EnvSettings: s, - Group: DefaultExperimentGroup, + Test: DefaultTestName, } initKubeFake(kd, objects...) initHelmFake(kd) diff --git a/go.mod b/go.mod index 825ed4b11..51a56c05c 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/iter8-tools/iter8 -go 1.19 +go 1.21 retract ( // Published v1 too early diff --git a/templates/notify/_payload-github.tpl b/templates/notify/_payload-github.tpl index d92fab4f2..090ad394f 100644 --- a/templates/notify/_payload-github.tpl +++ b/templates/notify/_payload-github.tpl @@ -1,4 +1,4 @@ { "event_type": "iter8", - "client_payload": {{ .Report | toPrettyJson }} + "client_payload": {{ .Summary | toPrettyJson }} } \ No newline at end of file diff --git a/templates/notify/_payload-slack.tpl b/templates/notify/_payload-slack.tpl index 79743c910..52baf0fe7 100644 --- a/templates/notify/_payload-slack.tpl +++ b/templates/notify/_payload-slack.tpl @@ -1,3 +1,3 @@ { - "text": "Your Iter8 report is ready: {{ regexReplaceAll "\"" (regexReplaceAll "\n" (.Report | toPrettyJson) "\\n") "\\\""}}" + "text": "Your Iter8 report is ready: {{ regexReplaceAll "\"" (regexReplaceAll "\n" (.Summary | toPrettyJson) "\\n") "\\\""}}" } \ No newline at end of file diff --git a/testdata/controllers/blue-green-http-kserve/bumpweights.sh b/testdata/controllers/blue-green-http-kserve/bumpweights.sh deleted file mode 100755 index c88286034..000000000 --- a/testdata/controllers/blue-green-http-kserve/bumpweights.sh +++ /dev/null @@ -1,4 +0,0 @@ -echo "kubectl annotate --overwrite isvc wisdom-primary iter8.tools/weight='20'" -echo "kubectl annotate --overwrite isvc wisdom-candidate iter8.tools/weight='80'" -kubectl annotate --overwrite isvc wisdom-primary iter8.tools/weight='20' -kubectl annotate --overwrite isvc wisdom-candidate iter8.tools/weight='80' diff --git a/testdata/controllers/blue-green-http-kserve/cleanup.sh b/testdata/controllers/blue-green-http-kserve/cleanup.sh deleted file mode 100755 index 7e803a8d1..000000000 --- a/testdata/controllers/blue-green-http-kserve/cleanup.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -kubectl delete isvc wisdom-primary wisdom-candidate -kubectl delete deploy sleep -kubectl delete svc wisdom -kubectl delete vs wisdom -kubectl delete cm wisdom wisdom-input diff --git a/testdata/controllers/blue-green-http-kserve/execintosleep.sh b/testdata/controllers/blue-green-http-kserve/execintosleep.sh deleted file mode 100755 index 1391b7697..000000000 --- a/testdata/controllers/blue-green-http-kserve/execintosleep.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -# First, get $SLEEP_POD -SLEEP_POD=$(kubectl get pod --sort-by={metadata.creationTimestamp} -l app=sleep -o jsonpath={.items..metadata.name} | rev | cut -d' ' -f 1 | rev) -# Second, exec into it -kubectl exec --stdin --tty "${SLEEP_POD}" -c sleep -- /bin/sh -# Third, cd wisdom && source query.sh in order to query wisdom \ No newline at end of file diff --git a/testdata/controllers/blue-green-http-kserve/initialize.sh b/testdata/controllers/blue-green-http-kserve/initialize.sh deleted file mode 100755 index b82424580..000000000 --- a/testdata/controllers/blue-green-http-kserve/initialize.sh +++ /dev/null @@ -1,130 +0,0 @@ -cat < parameters = 4; - - // The tensor contents using a data-type format. This field must - // not be specified if "raw" tensor contents are being used for - // the inference request. - InferTensorContents contents = 5; - } - - // An output tensor requested for an inference request. - message InferRequestedOutputTensor - { - // The tensor name. - string name = 1; - - // Optional requested output tensor parameters. - map parameters = 2; - } - - // The name of the model to use for inferencing. - string model_name = 1; - - // The version of the model to use for inference. If not given the - // server will choose a version based on the model and internal policy. - string model_version = 2; - - // Optional identifier for the request. If specified will be - // returned in the response. - string id = 3; - - // Optional inference parameters. - map parameters = 4; - - // The input tensors for the inference. - repeated InferInputTensor inputs = 5; - - // The requested output tensors for the inference. Optional, if not - // specified all outputs produced by the model will be returned. - repeated InferRequestedOutputTensor outputs = 6; - - // The data contained in an input tensor can be represented in "raw" - // bytes form or in the repeated type that matches the tensor's data - // type. To use the raw representation 'raw_input_contents' must be - // initialized with data for each tensor in the same order as - // 'inputs'. For each tensor, the size of this content must match - // what is expected by the tensor's shape and data type. The raw - // data must be the flattened, one-dimensional, row-major order of - // the tensor elements without any stride or padding between the - // elements. Note that the FP16 and BF16 data types must be represented as - // raw content as there is no specific data type for a 16-bit float type. - // - // If this field is specified then InferInputTensor::contents must - // not be specified for any input tensor. - repeated bytes raw_input_contents = 7; - } - - message ModelInferResponse - { - // An output tensor returned for an inference request. - message InferOutputTensor - { - // The tensor name. - string name = 1; - - // The tensor data type. - string datatype = 2; - - // The tensor shape. - repeated int64 shape = 3; - - // Optional output tensor parameters. - map parameters = 4; - - // The tensor contents using a data-type format. This field must - // not be specified if "raw" tensor contents are being used for - // the inference response. - InferTensorContents contents = 5; - } - - // The name of the model used for inference. - string model_name = 1; - - // The version of the model used for inference. - string model_version = 2; - - // The id of the inference request if one was specified. - string id = 3; - - // Optional inference response parameters. - map parameters = 4; - - // The output tensors holding inference results. - repeated InferOutputTensor outputs = 5; - - // The data contained in an output tensor can be represented in - // "raw" bytes form or in the repeated type that matches the - // tensor's data type. To use the raw representation 'raw_output_contents' - // must be initialized with data for each tensor in the same order as - // 'outputs'. For each tensor, the size of this content must match - // what is expected by the tensor's shape and data type. The raw - // data must be the flattened, one-dimensional, row-major order of - // the tensor elements without any stride or padding between the - // elements. Note that the FP16 and BF16 data types must be represented as - // raw content as there is no specific data type for a 16-bit float type. - // - // If this field is specified then InferOutputTensor::contents must - // not be specified for any output tensor. - repeated bytes raw_output_contents = 6; - } - - // An inference parameter value. The Parameters message describes a - // “name”/”value” pair, where the “name” is the name of the parameter - // and the “value” is a boolean, integer, or string corresponding to - // the parameter. - message InferParameter - { - // The parameter value can be a string, an int64, a boolean - // or a message specific to a predefined parameter. - oneof parameter_choice - { - // A boolean parameter value. - bool bool_param = 1; - - // An int64 parameter value. - int64 int64_param = 2; - - // A string parameter value. - string string_param = 3; - } - } - - // The data contained in a tensor represented by the repeated type - // that matches the tensor's data type. Protobuf oneof is not used - // because oneofs cannot contain repeated fields. - message InferTensorContents - { - // Representation for BOOL data type. The size must match what is - // expected by the tensor's shape. The contents must be the flattened, - // one-dimensional, row-major order of the tensor elements. - repeated bool bool_contents = 1; - - // Representation for INT8, INT16, and INT32 data types. The size - // must match what is expected by the tensor's shape. The contents - // must be the flattened, one-dimensional, row-major order of the - // tensor elements. - repeated int32 int_contents = 2; - - // Representation for INT64 data types. The size must match what - // is expected by the tensor's shape. The contents must be the - // flattened, one-dimensional, row-major order of the tensor elements. - repeated int64 int64_contents = 3; - - // Representation for UINT8, UINT16, and UINT32 data types. The size - // must match what is expected by the tensor's shape. The contents - // must be the flattened, one-dimensional, row-major order of the - // tensor elements. - repeated uint32 uint_contents = 4; - - // Representation for UINT64 data types. The size must match what - // is expected by the tensor's shape. The contents must be the - // flattened, one-dimensional, row-major order of the tensor elements. - repeated uint64 uint64_contents = 5; - - // Representation for FP32 data type. The size must match what is - // expected by the tensor's shape. The contents must be the flattened, - // one-dimensional, row-major order of the tensor elements. - repeated float fp32_contents = 6; - - // Representation for FP64 data type. The size must match what is - // expected by the tensor's shape. The contents must be the flattened, - // one-dimensional, row-major order of the tensor elements. - repeated double fp64_contents = 7; - - // Representation for BYTES data type. The size must match what is - // expected by the tensor's shape. The contents must be the flattened, - // one-dimensional, row-major order of the tensor elements. - repeated bytes bytes_contents = 8; - } - - input.json: | - { - "model_name":"wisdom", - "inputs": [ - { - "name": "input-0", - "shape": [2, 4], - "datatype": "FP32", - "contents": { - "fp32_contents": [6.8, 2.8, 4.8, 1.4, 6.0, 3.4, 4.5, 1.6] - } - } - ] - } - - query.sh: | - echo "cat input.json | grpcurl -plaintext -proto kserve.proto -d @ wisdom.default.svc.cluster.local:80 inference.GRPCInferenceService.ModelInfer" - cat input.json | grpcurl -plaintext -proto kserve.proto -d @ wisdom.default.svc.cluster.local:80 inference.GRPCInferenceService.ModelInfer -EOF diff --git a/testdata/controllers/mirror-grpc-kserve/steps.sh b/testdata/controllers/mirror-grpc-kserve/steps.sh deleted file mode 100644 index 034a9d83e..000000000 --- a/testdata/controllers/mirror-grpc-kserve/steps.sh +++ /dev/null @@ -1,16 +0,0 @@ -# initialize primary v1 -./initialize.sh -# query -./sleep.sh -# in a new terminal -./execintosleep.sh -# inside the sleep pod -cd wisdom -source query.sh - -# candidate v2 -./v2-candidate.sh - -# promote v2 -./promote-v2.sh -kubectl delete ns candidate diff --git a/testdata/controllers/mirror-grpc-kserve/v2-candidate.sh b/testdata/controllers/mirror-grpc-kserve/v2-candidate.sh deleted file mode 100755 index 38df294f7..000000000 --- a/testdata/controllers/mirror-grpc-kserve/v2-candidate.sh +++ /dev/null @@ -1,29 +0,0 @@ -cat < --output json | jq -r '.[].guid' - # https://cloud.ibm.com/docs/monitoring?topic=monitoring-mon-curl - IBMInstanceID: test-guid -provider: test-ce -method: GET -# Inputs for the template: -# ibm_codeengine_application_name string -# ibm_codeengine_gateway_instance string -# ibm_codeengine_namespace string -# ibm_codeengine_project_name string -# ibm_codeengine_revision_name string -# ibm_codeengine_status string -# ibm_ctype string -# ibm_location string -# ibm_scope string -# ibm_service_instance string -# ibm_service_name string -# -# Inputs for the metrics (output of template): -# ibm_codeengine_revision_name string -# startingTime string -# -# Note: elapsedTimeSeconds is produced by Iter8 -metrics: -- name: request-count - type: counter - description: | - Number of requests - params: - - name: query - value: | - sum(last_over_time(ibm_codeengine_application_requests_total{ - {{- if .ibm_codeengine_revision_name }} - ibm_codeengine_revision_name="{{.ibm_codeengine_revision_name}}", - {{- end }} - }[{{.elapsedTimeSeconds}}s])) or on() vector(0) - jqExpression: .data.result[0].value[1] | tonumber -- name: error-count - type: counter - description: | - Number of non-successful requests - params: - - name: query - value: | - sum(last_over_time(ibm_codeengine_application_requests_total{ - ibm_codeengine_status!="200", - {{- if .ibm_codeengine_revision_name }} - ibm_codeengine_revision_name="{{.ibm_codeengine_revision_name}}", - {{- end }} - }[{{.elapsedTimeSeconds}}s])) or on() vector(0) - jqExpression: .data.result[0].value[1] | tonumber -- name: error-rate - type: gauge - description: | - Percentage of non-successful requests - params: - - name: query - value: | - sum(last_over_time(ibm_codeengine_application_requests_total{ - ibm_codeengine_status!="200", - {{- if .ibm_codeengine_revision_name }} - ibm_codeengine_revision_name="{{.ibm_codeengine_revision_name}}", - {{- end }} - }[{{.elapsedTimeSeconds}}s])) or on() vector(0)/sum(last_over_time(ibm_codeengine_application_requests_total{ - {{- if .ibm_codeengine_revision_name }} - ibm_codeengine_revision_name="{{.ibm_codeengine_revision_name}}", - {{- end }} - }[{{.elapsedTimeSeconds}}s])) or on() vector(0) - jqExpression: .data.result.[0].value.[1] \ No newline at end of file diff --git a/testdata/metrics/test-request-body.metrics.yaml b/testdata/metrics/test-request-body.metrics.yaml deleted file mode 100644 index ac5dd1292..000000000 --- a/testdata/metrics/test-request-body.metrics.yaml +++ /dev/null @@ -1,16 +0,0 @@ -url: test-database.com/prometheus/api/v1/query -provider: test-request-body -method: GET -# Note: elapsedTimeSeconds is produced by Iter8 -metrics: -- name: request-count - type: counter - description: | - Number of requests - body: | - example request body - params: - - name: query - value: | - example query parameter - jqExpression: .data.result[0].value[1] | tonumber \ No newline at end of file