Skip to content

Commit

Permalink
feat: Add kube api for k6 scaffolding (#5494)
Browse files Browse the repository at this point in the history
* make tfsimple_pipeline test create random models underneath

* update alpine base image and install xk6

* make namespace configurable in shell script

* add xk6 to .gitignore

* add xk6 install as make target

* adjust wrapper to use k6 with xk6

* add note in readme regarding xk6

* add k8s test script scenario

* adjust namespace variable

* ability to set namespace in kustomize

* update model script

* add newline in file

* add NAMESPACE as envar

* wire up namespace

* parse pod namespace from envar

* use NAMESPACE env var

* add service account and remove unnecessary secret

* have main scenario back as default

* review comments
  • Loading branch information
sakoush authored Apr 9, 2024
1 parent 0b9ee7d commit bcf72b7
Show file tree
Hide file tree
Showing 9 changed files with 139 additions and 32 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -150,3 +150,6 @@ event.json
# Go workspace files
go.work
go.work.sum

# xk6 binary
tests/k6/k6
51 changes: 37 additions & 14 deletions samples/stress-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ function status() {
# The following functions are the `task` options that can be called by the user.

function tfsimple() {
echo $i
echo ${1}
sed 's/name: tfsimple1/name: tfsimple'"$1"'/g' models/tfsimple1.yaml > /tmp/models/tfsimple${1}.yaml
load model /tmp/models/tfsimple${1}.yaml
status model tfsimple${1}
Expand All @@ -111,7 +111,7 @@ function tfsimple() {
}

function iris() {
echo $i
echo ${1}
sed 's/name: iris/name: iris'"$1"'/g' models/iris-v1.yaml > /tmp/models/iris${1}.yaml
load model /tmp/models/iris${1}.yaml
status model iris${1}
Expand All @@ -121,25 +121,48 @@ function iris() {
}

function tfsimple_pipeline() {
echo $i
sed 's/name: tfsimples/name: tfsimples'"$1"'/g' pipelines/tfsimples.yaml > /tmp/pipelines/tfsimples${1}.yaml
load model ./models/tfsimple1.yaml
load model ./models/tfsimple2.yaml
status model tfsimple1
status model tfsimple2
echo ${1}
model_1=$((1 + $RANDOM % 20))
model_2=$((1 + $RANDOM % 20))
echo '''
apiVersion: mlops.seldon.io/v1alpha1
kind: Pipeline
metadata:
name: tfsimples'${1}'
spec:
steps:
- name: tfsimple'${model_1}'
- name: tfsimple'${model_2}'
inputs:
- tfsimple'${model_1}'
tensorMap:
tfsimple'${model_1}'.outputs.OUTPUT0: INPUT0
tfsimple'${model_1}'.outputs.OUTPUT1: INPUT1
output:
steps:
- tfsimple'${model_2}'
''' > /tmp/pipelines/tfsimples${1}.yaml

sed 's/name: tfsimple1/name: tfsimple'"${model_1}"'/g' models/tfsimple1.yaml > /tmp/models/tfsimple${model_1}.yaml
load model /tmp/models/tfsimple${model_1}.yaml

sed 's/name: tfsimple1/name: tfsimple'"${model_2}"'/g' models/tfsimple1.yaml > /tmp/models/tfsimple${model_2}.yaml
load model /tmp/models/tfsimple${model_2}.yaml

status model tfsimple${model_1}
status model tfsimple${model_2}

load pipeline /tmp/pipelines/tfsimples${1}.yaml
status pipeline tfsimples${1}
seldon pipeline infer tfsimples${1} '{"inputs":[{"name":"INPUT0","data":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16],"datatype":"INT32","shape":[1,16]},{"name":"INPUT1","data":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16],"datatype":"INT32","shape":[1,16]}]}'
seldon pipeline infer tfsimples${1} --inference-mode grpc '{"model_name":"simple","inputs":[{"name":"INPUT0","contents":{"int_contents":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]},"datatype":"INT32","shape":[1,16]},{"name":"INPUT1","contents":{"int_contents":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]},"datatype":"INT32","shape":[1,16]}]}'
unload pipeline tfsimples${1} /tmp/pipelines/tfsimples${1}.yaml
# we cant unload the models here as they are used by other pipelines
# TODO: create sub models for each pipeline?
# unload model tfsimple1 ./models/tfsimple1.yaml
# unload model tfsimple2 ./models/tfsimple2.yaml
unload model tfsimple${model_1} /tmp/models/tfsimple${model_1}.yaml
unload model tfsimple${model_2} /tmp/models/tfsimple${model_2}.yaml
}

function tfsimple_join_pipeline() {
echo $i
echo ${1}
sed 's/name: join/name: join'"$1"'/g' pipelines/tfsimples-join.yaml > /tmp/pipelines/tfsimples-join${1}.yaml
load model ./models/tfsimple1.yaml
load model ./models/tfsimple2.yaml
Expand All @@ -152,7 +175,7 @@ function tfsimple_join_pipeline() {
seldon pipeline infer join${1} '{"inputs":[{"name":"INPUT0","data":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16],"datatype":"INT32","shape":[1,16]},{"name":"INPUT1","data":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16],"datatype":"INT32","shape":[1,16]}]}'
seldon pipeline infer join${1} --inference-mode grpc '{"model_name":"simple","inputs":[{"name":"INPUT0","contents":{"int_contents":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]},"datatype":"INT32","shape":[1,16]},{"name":"INPUT1","contents":{"int_contents":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]},"datatype":"INT32","shape":[1,16]}]}'
unload pipeline join${1} /tmp/pipelines/tfsimples-join${1}.yaml
# we cant unload the models here as they are used by other pipelines
# we cant unload the models here as they are used by other pipelines ?
# TODO: create sub models for each pipeline?
# unload model tfsimple1 ./models/tfsimple1.yaml
# unload model tfsimple2 ./models/tfsimple2.yaml
Expand Down
8 changes: 5 additions & 3 deletions tests/k6/Dockerfile.k6
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM alpine:3.6 AS builder
FROM alpine:3.19.1 AS builder

RUN apk add --update \
python3 \
Expand All @@ -12,8 +12,7 @@ FROM grafana/k6

USER root


RUN apk add --update python3 uuidgen sed
RUN apk add --update python3 uuidgen sed go

COPY --from=builder /root/google-cloud-sdk /root/google-cloud-sdk

Expand All @@ -29,6 +28,9 @@ ADD apis /home/apis/

WORKDIR k6

RUN go install go.k6.io/xk6/cmd/xk6@latest
RUN ~/go/bin/xk6 build --with github.com/grafana/xk6-kubernetes

RUN mkdir results

RUN chmod +x k6wrapper.sh
Expand Down
26 changes: 16 additions & 10 deletions tests/k6/Makefile
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
CUSTOM_IMAGE_TAG ?= latest
DOCKERHUB_USERNAME ?= seldonio
NAMESPACE ?= seldon-mesh

IMG ?= ${DOCKERHUB_USERNAME}/seldon-k6:${CUSTOM_IMAGE_TAG}
GIT_COMMIT := $(shell git rev-parse HEAD)
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD)

CMD = kubectl describe pod ${POD} -n seldon-mesh | grep IP: -m 1 | cut -d' ' -f2- | xargs
CMD = kubectl describe pod ${POD} -n ${NAMESPACE} | grep IP: -m 1 | cut -d' ' -f2- | xargs

POD = seldon-scheduler
SCHEDULER_ENDPOINT := $(shell ${CMD})
Expand Down Expand Up @@ -40,53 +41,58 @@ docker-run:
build-push: docker-build docker-push

deploy-envoy-test:
cd configs/k8s/base && kustomize edit set image k6=${IMG}
cd configs/k8s/base && kustomize edit set image k6=${IMG} && kustomize edit set namespace ${NAMESPACE}
kustomize build configs/k8s/overlays/envoy | SCHEDULER_ENDPOINT="${SCHEDULER_ENDPOINT}" ENVOY_ENDPOINT="${ENVOY_ENDPOINT}" envsubst | kubectl apply -f -

undeploy-envoy-test:
kustomize build configs/k8s/overlays/envoy | kubectl delete -f -

deploy-rproxy-test:
cd configs/k8s/base && kustomize edit set image k6=${IMG}
cd configs/k8s/base && kustomize edit set image k6=${IMG} && kustomize edit set namespace ${NAMESPACE}
kustomize build configs/k8s/overlays/rproxy | SCHEDULER_ENDPOINT="${SCHEDULER_ENDPOINT}" RPROXY_ENDPOINT="${RPROXY_TRITON_ENDPOINT}" envsubst | kubectl apply -f -

deploy-rproxy-mlserver-test:
cd configs/k8s/base && kustomize edit set image k6=${IMG}
cd configs/k8s/base && kustomize edit set image k6=${IMG} && kustomize edit set namespace ${NAMESPACE}
kustomize build configs/k8s/overlays/rproxy | SCHEDULER_ENDPOINT="${SCHEDULER_ENDPOINT}" RPROXY_ENDPOINT="${RPROXY_MLSERVER_ENDPOINT}" envsubst | kubectl apply -f -

undeploy-rproxy-test:
kustomize build configs/k8s/overlays/rproxy | kubectl delete -f -

deploy-server-test:
cd configs/k8s/base && kustomize edit set image k6=${IMG}
cd configs/k8s/base && kustomize edit set image k6=${IMG} && kustomize edit set namespace ${NAMESPACE}
kustomize build configs/k8s/overlays/server | SCHEDULER_ENDPOINT="${SCHEDULER_ENDPOINT}" RPROXY_ENDPOINT="${RPROXY_TRITON_ENDPOINT}" envsubst | kubectl apply -f -

deploy-server-mlserver-test:
cd configs/k8s/base && kustomize edit set image k6=${IMG}
cd configs/k8s/base && kustomize edit set image k6=${IMG} && kustomize edit set namespace ${NAMESPACE}
kustomize build configs/k8s/overlays/server | SCHEDULER_ENDPOINT="${SCHEDULER_ENDPOINT}" RPROXY_ENDPOINT="${RPROXY_MLSERVER_ENDPOINT}" envsubst | kubectl apply -f -

undeploy-server-test:
kustomize build configs/k8s/overlays/server | kubectl delete -f -

deploy-kmodel-test:
cd configs/k8s/base && kustomize edit set image k6=${IMG}
cd configs/k8s/base && kustomize edit set image k6=${IMG} && kustomize edit set namespace ${NAMESPACE}
kustomize build configs/k8s/overlays/kmodel | SCHEDULER_ENDPOINT="${SCHEDULER_ENDPOINT}" PIPELINE_ENDPOINT="${PIPELINE_ENDPOINT}" envsubst | kubectl apply -f -

undeploy-kmodel-test:
kustomize build configs/k8s/overlays/kmodel | kubectl delete -f -

deploy-kpipeline-test:
cd configs/k8s/base && kustomize edit set image k6=${IMG}
cd configs/k8s/base && kustomize edit set image k6=${IMG} && kustomize edit set namespace ${NAMESPACE}
kustomize build configs/k8s/overlays/kpipeline | SCHEDULER_ENDPOINT="${SCHEDULER_ENDPOINT}" PIPELINE_ENDPOINT="${PIPELINE_ENDPOINT}" envsubst | kubectl apply -f -

undeploy-kpipeline-test:
kustomize build configs/k8s/overlays/kpipeline | kubectl delete -f -

undeploy-all-test:
kubectl get jobs -n seldon-mesh --no-headers=true | cut -d' ' -f1 | xargs kubectl delete -n seldon-mesh job
kubectl get jobs -n ${NAMESPACE} --no-headers=true | cut -d' ' -f1 | xargs kubectl delete -n ${NAMESPACE} job

create-secret:
#gcloud iam service-accounts create ${SERVICE_ACCOUNT_NAME} --display-name="SCV2 k6 tests"
#gsutil iam ch serviceAccount:${SERVICE_ACCOUNT_NAME}@${PROJECT_ID}.iam.gserviceaccount.com:objectAdmin gs://${GCS_BUCKET_NAME}/
gcloud iam service-accounts keys create --iam-account "${SERVICE_ACCOUNT_NAME}@${PROJECT_ID}.iam.gserviceaccount.com" k6-service-account.json
kubectl create secret generic k6-sa-key --from-file k6-service-account.json -n seldon-mesh
kubectl create secret generic k6-sa-key --from-file k6-service-account.json -n ${NAMESPACE}

xk6-install:
# Install xk6
go install github.com/grafana/xk6/cmd/xk6@latest
xk6 build --with github.com/grafana/xk6-kubernetes
1 change: 1 addition & 0 deletions tests/k6/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
* Install k6 (load driver)
* [link](https://k6.io/docs/getting-started/installation/)
* Alternatively you can build a docker image that includes k6 via: `make docker-build`
* (Optional) Install kube api `make xk6-install`, remember to use the local folder k6 binary if kube apis are required
* Local testing
* Deploy the system using docker compose via: `make deploy-local`

Expand Down
8 changes: 8 additions & 0 deletions tests/k6/components/settings.js
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,13 @@ function constantRateDurationSeconds() {
return 30
}

function podNamespace() {
if (__ENV.NAMESPACE) {
return __ENV.NAMESPACE
}
return "seldon-mesh"
}

export function getConfig() {
return {
"schedulerEndpoint": schedulerEndpoint(),
Expand Down Expand Up @@ -225,5 +232,6 @@ export function getConfig() {
"requestRate": requestRate(),
"constantRateDurationSeconds": constantRateDurationSeconds(),
"modelReplicas": modelReplicas(),
"namespace": podNamespace(),
}
}
21 changes: 17 additions & 4 deletions tests/k6/configs/k8s/base/k6.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,9 @@ spec:
containers:
- name: k6
image: k6:latest
imagePullPolicy: Always
# infer_constant_vu
imagePullPolicy: IfNotPresent
# # choose from the following scenarios:
# # infer_constant_vu
args: [
"--no-teardown",
"--summary-export",
Expand All @@ -26,7 +27,7 @@ spec:
"120m",
"scenarios/infer_constant_vu.js",
]
# infer_constant_vu
# # infer_constant_vu
# args: [
# "--no-teardown",
# "--summary-export",
Expand All @@ -35,6 +36,14 @@ spec:
# "csv=results/base.gz",
# "scenarios/infer_constant_rate.js",
# ]
# # k8s-test-script
# args: [
# "--summary-export",
# "results/base.json",
# "--out",
# "csv=results/base.gz",
# "scenarios/k8s-test-script.js",
# ]
env:
- name: SCHEDULER_ENDPOINT
value: "${SCHEDULER_ENDPOINT}:9004"
Expand All @@ -59,6 +68,10 @@ spec:
value: "/var/run/secret/cloud.google.com/k6-service-account.json"
- name: GS_BUCKET_NAME
value: "gs://seldon-tmp/scv2-k6-results"
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: "service-account"
mountPath: "/var/run/secret/cloud.google.com"
Expand All @@ -76,4 +89,4 @@ spec:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
serviceAccountName: seldon-v2-controller-manager
3 changes: 2 additions & 1 deletion tests/k6/k6wrapper.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@ if [ -f "$GOOGLE_APPLICATION_CREDENTIALS" ]; then
gcloud auth activate-service-account --key-file=$GOOGLE_APPLICATION_CREDENTIALS
fi
echo "start:"$(date) > $DIR/$METADATA
k6 $@
# use local k6 that is built with xk6 to include kube apis
./k6 $@
echo "end:"$(date) >> $DIR/$METADATA
echo "args:"$@ >> $DIR/$METADATA
echo "envs:"$(printenv) >> $DIR/$METADATA
Expand Down
50 changes: 50 additions & 0 deletions tests/k6/scenarios/k8s-test-script.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
// adapted from https://github.com/grafana/xk6-kubernetes/blob/main/examples/deployment_operations.js
// note that xk6 needs to be installed to run this script
import { Kubernetes } from "k6/x/kubernetes";
import { describe, expect } from "https://jslib.k6.io/k6chaijs/4.3.4.3/index.js";
import { load, dump } from "https://cdn.jsdelivr.net/npm/[email protected]/dist/js-yaml.mjs";
import { getConfig } from '../components/settings.js';

let yaml = `
apiVersion: mlops.seldon.io/v1alpha1
kind: Model
metadata:
name: tfsimple1
namespace: ${getConfig().namespace}
spec:
storageUri: "gs://seldon-models/triton/simple"
requirements:
- tensorflow
memory: 100Ki
`

export default function () {
const kubernetes = new Kubernetes();

describe('YAML-based resources', () => {
let yamlObject = load(yaml)
const name = yamlObject.metadata.name
const ns = yamlObject.metadata.namespace

describe('Create our Model using the YAML definition', () => {
kubernetes.apply(yaml)
let created = kubernetes.get("Model.mlops.seldon.io", name, ns)
expect(created.metadata, 'new Model').to.have.property('uid')
})

describe('Update our Model with a modified YAML definition', () => {
const newValue = 2
yamlObject.spec.replicas = newValue
let newYaml = dump(yamlObject)

kubernetes.apply(newYaml)
let updated = kubernetes.get("Model.mlops.seldon.io", name, ns)
expect(updated.spec.replicas, 'changed value').to.be.equal(newValue)
})

describe('Remove our Model to cleanup', () => {
kubernetes.delete("Model.mlops.seldon.io", name, ns)
})
})

}

0 comments on commit bcf72b7

Please sign in to comment.