diff --git a/.style.yapf b/.style.yapf new file mode 100644 index 00000000000..9668b4b10a2 --- /dev/null +++ b/.style.yapf @@ -0,0 +1,4 @@ +[style] +based_on_style = google +indent_width: 2 +continuation_indent_width: 2 diff --git a/components/k8s-model-server/images/releaser/components/workflows.libsonnet b/components/k8s-model-server/images/releaser/components/workflows.libsonnet index 04b63630935..fc495a26d98 100644 --- a/components/k8s-model-server/images/releaser/components/workflows.libsonnet +++ b/components/k8s-model-server/images/releaser/components/workflows.libsonnet @@ -134,6 +134,12 @@ }, }, }, + // We use a directory in our NFS share to store our kube config. + // This way we can configure it on a single step and reuse it on subsequent steps. + { + name: "KUBECONFIG", + value: testDir + "/.kube/config", + }, ] + prow_env + env_vars, volumeMounts: [ { @@ -220,13 +226,17 @@ name: "checkout", template: "checkout", }, - { name: "create-pr-symlink", template: "create-pr-symlink", dependencies: ["checkout"], }, + { + name: "setup", + template: "setup", + dependencies: ["checkout"], + }, { name: "test-tf-serving", template: "test-tf-serving", @@ -253,22 +263,20 @@ { name: "deploy-tf-serving", template: "deploy-tf-serving", - dependencies: ["build-tf-serving-cpu"], + dependencies: ["build-tf-serving-cpu", "setup"], }, ] else [ { name: "deploy-tf-serving", template: "deploy-tf-serving", - dependencies: ["checkout"], + dependencies: ["setup"], }, ]; local deploy_tf_serving_command_base = [ "python", "-m", "testing.test_deploy", - "--project=" + project, - "--cluster=" + cluster, - "--zone=" + zone, + "--project=" + project, "--github_token=$(GITHUB_TOKEN)", // TODO(jlewi): This is duplicative with params. We should probably get // rid of this and just treat namespace as another parameter. @@ -355,6 +363,19 @@ buildImageTemplate("build-tf-serving-cpu", "Dockerfile.cpu", cpuImage), + // Setup configures a kubeconfig file for GKE. + buildTemplate("setup", [ + "python", + "-m", + "testing.test_deploy", + "--project=" + project, + "--test_dir=" + testDir, + "--artifacts_dir=" + artifactsDir, + "get_gke_credentials", + "--cluster=" + cluster, + "--zone=" + zone, + ]), // setup + buildTemplate( "deploy-tf-serving", deploy_tf_serving_command, @@ -395,9 +416,7 @@ "-m", "testing.test_deploy", "--project=" + project, - "--cluster=" + cluster, "--namespace=" + stepsNamespace, - "--zone=" + zone, "--test_dir=" + testDir, "--artifacts_dir=" + artifactsDir, "teardown", diff --git a/prow_config.yaml b/prow_config.yaml index 503e89308b4..f3c79aafcb8 100644 --- a/prow_config.yaml +++ b/prow_config.yaml @@ -1,9 +1,18 @@ # This file configures the workflows to trigger in our Prow jobs. # see kubeflow/testing/py/run_e2e_workflow.py workflows: + # Run tests on GKE - app_dir: kubeflow/kubeflow/testing/workflows component: workflows - name: kubeflow-e2e + name: kubeflow-e2e-gke + params: + platform: gke + # Run tests on minikube + - app_dir: kubeflow/kubeflow/testing/workflows + component: workflows + name: kubeflow-e2e-minikube + params: + platform: minikube - app_dir: kubeflow/kubeflow/components/k8s-model-server/images/releaser component: workflows name: tf-serving-image diff --git a/releasing.md b/releasing.md index 0ab12a19e54..2cfac96d3a5 100644 --- a/releasing.md +++ b/releasing.md @@ -4,10 +4,10 @@ Some preliminary instructions for how to cut a release. ## Authenticate to GCP -If you're new to using GKE or are new to the release team, you'll need to authenticate to GCP first. [Install the `gcloud` tool](https://cloud.google.com/sdk/gcloud/) and then execute the following commands, substituting your release team account for `your-team-account@kubeflow.org`: +If you're new to using GKE or are new to the release team, you'll need to authenticate to GCP first. [Install the `gcloud` tool](https://cloud.google.com/sdk/gcloud/) and then execute the following commands, substituting your Kubeflow release team account for `your-account@yourdomain.org` (if you aren't a member of `release-team@kubeflow.org`, ask to be added): ``` -gcloud config set account your-team-account@kubeflow.org +gcloud config set account your-account@yourdomain.org gcloud auth ``` diff --git a/testing/install_minikube.sh b/testing/install_minikube.sh index 5875a45a9a5..c914be401cc 100644 --- a/testing/install_minikube.sh +++ b/testing/install_minikube.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# A helper script to run on a VM to install minikube. +# A helper script to run on a VM to install and start minikube. set -ex @@ -32,5 +32,9 @@ curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.25.0/minik chmod +x minikube sudo mv minikube /usr/local/bin/ +# We need a large disk for JupyterHub. +sudo minikube start --vm-driver=none --disk-size=40g + +# Change the permissions because we will copy these files. sudo chmod -R a+rw ~/.kube sudo chmod -R a+rw ~/.minikube diff --git a/testing/test_deploy.py b/testing/test_deploy.py index 9393eb1ca44..27ebddc93fc 100644 --- a/testing/test_deploy.py +++ b/testing/test_deploy.py @@ -41,6 +41,7 @@ from kubernetes import client as k8s_client from kubernetes.client import rest +from kubernetes.config import kube_config from kubernetes.config import incluster_config from testing import vm_util @@ -77,24 +78,9 @@ def _setup_test(api_client, run_label): return namespace -def create_k8s_client(args): - if args.cluster: - project = args.project - cluster_name = args.cluster - zone = args.zone - logging.info("Using cluster: %s in project: %s in zone: %s", - cluster_name, project, zone) - # Print out config to help debug issues with accounts and - # credentials. - util.run(["gcloud", "config", "list"]) - util.configure_kubectl(project, zone, cluster_name) - util.load_kube_config() - else: - # TODO(jlewi): This is sufficient for API access but it doesn't create - # a kubeconfig file which ksonnet needs for ks init. - logging.info("Running inside cluster.") - incluster_config.load_incluster_config() - +def create_k8s_client(args): + util.load_kube_config() + # Create an API client object to talk to the K8s master. api_client = k8s_client.ApiClient() @@ -154,8 +140,22 @@ def setup_kubeflow_ks_app(args, api_client): return app_dir -def setup(args): - """Test deploying Kubeflow.""" +def get_gke_credentials(args): + """Configure kubeconfig to talk to the supplied GKE cluster.""" + config_file = os.path.expanduser(kube_config.KUBE_CONFIG_DEFAULT_LOCATION) + logging.info("Using Kubernetes config file: %s", config_file) + project = args.project + cluster_name = args.cluster + zone = args.zone + logging.info("Using cluster: %s in project: %s in zone: %s", + cluster_name, project, zone) + # Print out config to help debug issues with accounts and + # credentials. + util.run(["gcloud", "config", "list"]) + util.configure_kubectl(project, zone, cluster_name) + +def deploy_kubeflow(args): + """Deploy Kubeflow.""" api_client = create_k8s_client(args) app_dir = setup_kubeflow_ks_app(args, api_client) @@ -163,20 +163,48 @@ def setup(args): # TODO(jlewi): We don't need to generate a core component if we are # just deploying TFServing. Might be better to refactor this code. # Deploy Kubeflow +<<<<<<< HEAD util.run(["ks", "generate", "core", "kubeflow-core", "--name=kubeflow-core", "--namespace=" + namespace], cwd=app_dir) - - # TODO(jlewi): For reasons I don't understand even though we ran - # configure_kubectl above, if we don't rerun it we get rbac errors - # when we do ks apply; I think because we aren't using the proper service - # account. This might have something to do with the way ksonnet gets - # its credentials; maybe we need to configure credentials after calling - # ks init? - if args.cluster: - util.configure_kubectl(args.project, args.zone, args.cluster) - +======= + util.run( + [ + "ks", "generate", "core", "kubeflow-core", "--name=kubeflow-core", + "--namespace=" + namespace + ], + cwd=app_dir) + + apply_command = [ + "ks", + "apply", + "default", + "-c", + "kubeflow-core", + ] +>>>>>>> dd576ad... Turn on verbose logging. + +<<<<<<< HEAD apply_command = ["ks", "apply", "default", "-c", "kubeflow-core",] - + +======= + if args.as_gcloud_user: + account = get_gcp_identity() + logging.info("Impersonate %s", account) + +<<<<<<< HEAD + # If we don't use --as to impersonate the service account then we + # observe RBAC errors when doing certain operations. The problem appears + # to be that we end up using the in cluster config (e.g. pod service account) + # and not the GCP service account which has more privileges. + apply_command.append("--as=" + account) +>>>>>>> bd6305b... Fix workflow. +======= + # If we don't use --as to impersonate the service account then we + # observe RBAC errors when doing certain operations. The problem appears + # to be that we end up using the in cluster config (e.g. pod service account) + # and not the GCP service account which has more privileges. + apply_command.append("--as=" + account) +>>>>>>> 9e98e2b... Fix as. util.run(apply_command, cwd=app_dir) # Verify that the TfJob operator is actually deployed. @@ -211,7 +239,6 @@ def deploy_model(args): raise ValueError("namespace must be supplied via --params.") namespace = params["namespace"] - # Set env to none so random env will be created. ks_deploy(app_dir, component, params, env=None, account=None) core_api = k8s_client.CoreV1Api(api_client) @@ -297,7 +324,7 @@ def ks_deploy(app_dir, component, params, env=None, account=None): apply_command = ["ks", "apply", env, "-c", component] if account: - apply_command.append("--as=" + account) + apply_command.append("--as=" + account) util.run(apply_command, cwd=app_dir) def modify_minikube_config(config_path, certs_dir): @@ -383,7 +410,6 @@ def deploy_minikube(args): vm_util.wait_for_vm(args.project, args.zone, args.vm_name) vm_util.execute_script(args.project, args.zone, args.vm_name, install_script) - vm_util.execute(args.project, args.zone, args.vm_name, ["sudo minikube start --vm-driver=none --disk-size=40g"]) # Copy the .kube and .minikube files to test_dir # The .minikube directory contains some really large ISO and other files that we don't need; so we @@ -408,17 +434,59 @@ def deploy_minikube(args): def teardown_minikube(args): """Delete the VM used for minikube.""" - + credentials = GoogleCredentials.get_application_default() gce = discovery.build("compute", "v1", credentials=credentials) instances = gce.instances() - + request = instances.delete(project=args.project, zone=args.zone, instance=args.vm_name) - + request.execute() + +def maybe_configure_kubectl_for_gcp(config_path): + logging.info("Checking if we need to refresh GCP config for kubectl %s", config_path) + with open(config_path, "r") as hf: + config = yaml.load(hf) + + current_context = config.get("current-context") + for context in config["contexts"]: + if not current_context == context.get("name"): + continue + + cluster = context.get("context", {}).get("cluster", "") + + break + + if not cluster.startswith("gke_"): + logging.info("Cluster %s is not a gke cluster", cluster) + return + + pieces = cluster.split("_", 4) + + if not len(pieces) == 4: + message = "Could not split {0} into gke___".format(cluster) + logging.error(message) + raise ValueError(message) + project = pieces[1] + zone = pieces[2] + cluster = pieces[3] + + util.configure_kubectl(project, zone, cluster) + +<<<<<<< HEAD def main(): # pylint: disable=too-many-locals logging.getLogger().setLevel(logging.INFO) # pylint: disable=too-many-locals +======= + +def get_gcp_identity(): + identity = util.run_and_output(["gcloud", "config", "get-value", "account"]) + logging.info("Current GCP account: %s", identity) + return identity + +def main(): # pylint: disable=too-many-locals,too-many-statements + logging.getLogger().setLevel(logging.INFO) # pylint: disable=too-many-locals +>>>>>>> dd576ad... Turn on verbose logging. # create the top-level parser parser = argparse.ArgumentParser( description="Test Kubeflow E2E.") @@ -437,31 +505,20 @@ def main(): # pylint: disable=too-many-locals help="Directory to use for artifacts that should be preserved after " "the test runs. Defaults to test_dir if not set.") + # TODO(jlewi): This should not be a global flag. parser.add_argument( "--project", default=None, type=str, help="The project to use.") - parser.add_argument( - "--cluster", - default=None, - type=str, - help=("The name of the cluster. If not set assumes the " - "script is running in a cluster and uses that cluster.")) - + # TODO(jlewi): This should not be a global flag. parser.add_argument( "--namespace", - required=True, + default=None, type=str, help=("The namespace to use.")) - parser.add_argument( - "--zone", - default="us-east1-d", - type=str, - help="The zone for the cluster.") - parser.add_argument( "--github_token", default=None, @@ -480,11 +537,24 @@ def main(): # pylint: disable=too-many-locals subparsers = parser.add_subparsers() - parser_setup = subparsers.add_parser( - "setup", - help="setup the test infrastructure.") + parser_gke = subparsers.add_parser( + "get_gke_credentials", + help="Configure kubectl for a GKE cluster.") + + parser_gke.set_defaults(func=get_gke_credentials) + + parser_gke.add_argument( + "--cluster", + default=None, + type=str, + help=("The name of the cluster. If not set assumes the " + "script is running in a cluster and uses that cluster.")) - parser_setup.set_defaults(func=setup) + parser_gke.add_argument( + "--zone", + default="us-east1-d", + type=str, + help="The zone for the cluster.") parser_teardown = subparsers.add_parser( "teardown", @@ -492,6 +562,12 @@ def main(): # pylint: disable=too-many-locals parser_teardown.set_defaults(func=teardown) + parser_kubeflow = subparsers.add_parser( + "deploy_kubeflow", + help="Deploy kubeflow.") + + parser_kubeflow.set_defaults(func=deploy_kubeflow) + parser_tf_serving = subparsers.add_parser( "deploy_model", help="Deploy a TF serving model.") @@ -515,13 +591,13 @@ def main(): # pylint: disable=too-many-locals required=True, type=str, help="The name of the VM to use.") - + parser_minikube.add_argument( "--zone", - required=True, + default="us-east1-d", type=str, - help="The zone to deploy the VM in.") - + help="The zone for the cluster.") + parser_teardown_minikube = subparsers.add_parser( "teardown_minikube", help="Delete the VM running minikube.") @@ -529,17 +605,16 @@ def main(): # pylint: disable=too-many-locals parser_teardown_minikube.set_defaults(func=teardown_minikube) parser_teardown_minikube.add_argument( - "--vm_name", - required=True, + "--zone", + default="us-east1-d", type=str, - help="The name of the VM to use.") - + help="The zone for the cluster.") + parser_teardown_minikube.add_argument( - "--zone", + "--vm_name", required=True, type=str, - help="The zone to deploy the VM in.") - + help="The name of the VM to use.") args = parser.parse_args() @@ -578,7 +653,17 @@ def main(): # pylint: disable=too-many-locals util.run(["ks", "version"]) util.maybe_activate_service_account() + config_file = os.path.expanduser(kube_config.KUBE_CONFIG_DEFAULT_LOCATION) + + # TODO(jlewi): We should move this into kubeflow/testing + if os.path.exists(config_file): + maybe_configure_kubectl_for_gcp(config_file) + else: + logging.info("KUBECONFIG %s doesn't exist skipping maybe_configure_kubectl_for_gcp") + # Print out the config to help debugging. + output = util.run_and_output(["gcloud", "config", "config-helper"]) + logging.info("gcloud config: \n%s",output) wrap_test(args) if __name__ == "__main__": diff --git a/testing/workflows/app.yaml b/testing/workflows/app.yaml index 056531459de..594d22f82fa 100644 --- a/testing/workflows/app.yaml +++ b/testing/workflows/app.yaml @@ -1,5 +1,11 @@ apiVersion: 0.1.0 environments: + kubeflow-ci: + destination: + namespace: kubeflow-test-infra + server: https://35.185.54.227 + k8sVersion: v1.7.0 + path: kubeflow-ci prow: destination: namespace: kubeflow-testing diff --git a/testing/workflows/components/params.libsonnet b/testing/workflows/components/params.libsonnet index 85374aaf5c2..f8bbab7223d 100644 --- a/testing/workflows/components/params.libsonnet +++ b/testing/workflows/components/params.libsonnet @@ -8,10 +8,12 @@ // Each object below should correspond to a component in the components/ directory workflows: { bucket: "kubeflow-ci_temp", - name: "jlewi-kubeflow-kubeflow-presubmit-test-227-643b", + mode: "minikube", + name: "jlewi-kubeflow-kubeflow-presubmit-test-473-290a", namespace: "kubeflow-test-infra", + platform: "minikube", prow: "JOB_NAME=kubeflow-presubmit-test,JOB_TYPE=presubmit,PULL_NUMBER=209,REPO_NAME=kubeflow,REPO_OWNER=kubeflow,BUILD_NUMBER=997a", - prow_env: "JOB_NAME=kubeflow-kubeflow-presubmit-test,JOB_TYPE=presubmit,PULL_NUMBER=227,REPO_NAME=kubeflow,REPO_OWNER=kubeflow,BUILD_NUMBER=643b", + prow_env: "JOB_NAME=kubeflow-kubeflow-presubmit-test,JOB_TYPE=presubmit,PULL_NUMBER=473,REPO_NAME=kubeflow,REPO_OWNER=kubeflow,BUILD_NUMBER=290a", }, }, } diff --git a/testing/workflows/components/workflows.jsonnet b/testing/workflows/components/workflows.jsonnet index 9a3a765def7..1c4ad50345f 100644 --- a/testing/workflows/components/workflows.jsonnet +++ b/testing/workflows/components/workflows.jsonnet @@ -10,4 +10,4 @@ local name = params.name; local prowEnv = workflows.parseEnv(params.prow_env); local bucket = params.bucket; -std.prune(k.core.v1.list.new([workflows.parts(namespace, name).e2e(prowEnv, bucket)])) +std.prune(k.core.v1.list.new([workflows.parts(namespace, name).e2e(prowEnv, bucket, params.platform)])) diff --git a/testing/workflows/components/workflows.libsonnet b/testing/workflows/components/workflows.libsonnet index ac6e7cec770..58ac65d5fb6 100644 --- a/testing/workflows/components/workflows.libsonnet +++ b/testing/workflows/components/workflows.libsonnet @@ -21,7 +21,7 @@ parts(namespace, name):: { // Workflow to run the e2e test. - e2e(prow_env, bucket): + e2e(prow_env, bucket, platform="minikube"): // The name for the workspace to run the steps in local stepsNamespace = name; // mountPath is the directory where the volume to store the test data @@ -37,6 +37,7 @@ // The directory containing the kubeflow/kubeflow repo local srcDir = srcRootDir + "/kubeflow/kubeflow"; local image = "gcr.io/kubeflow-ci/test-worker:latest"; + // The name of the NFS volume claim to use for test files. local nfsVolumeClaim = "nfs-external"; // The name to use for the volume to use to contain test data. @@ -48,9 +49,24 @@ local tfOperatorRoot = srcRootDir + "/kubeflow/tf-operator"; local tfOperatorPy = tfOperatorRoot; + // VM to use for minikube. + local vmName = + if platform == "minikube" then + if std.length(name) > 61 then + // We append a letter because it must start with a lowercase letter. + // We use a suffix because the suffix contains the random salt. + "z" + std.substr(name, std.length(name) - 60, 60) + else + name + else + ""; local project = "kubeflow-ci"; // GKE cluster to use - local cluster = "kubeflow-testing"; + local cluster = + if platform == "gke" then + "kubeflow-testing" + else + ""; local zone = "us-east1-d"; // Build an Argo template to execute a particular command. // step_name: Name for the template @@ -80,6 +96,12 @@ }, }, }, + // We use a directory in our NFS share to store our kube config. + // This way we can configure it on a single step and reuse it on subsequent steps. + { + name: "KUBECONFIG", + value: testDir + "/.kube/config", + }, ] + prow_env + env_vars, volumeMounts: [ { @@ -104,8 +126,14 @@ metadata: { name: name, namespace: namespace, + labels: { + org: "kubeflow", + repo: "kubeflow", + workflow: "e2e", + // TODO(jlewi): Add labels for PR number and commit. Need to write a function + // to convert list of environment variables to labels. + }, }, - // TODO(jlewi): Use OnExit to run cleanup steps. spec: { entrypoint: "e2e", volumes: [ @@ -128,86 +156,163 @@ }, }, ], // volumes - // onExit specifies the template that should always run when the workflow completes. + // onExit specifies the template that should always run when the workflow completes. onExit: "exit-handler", templates: [ { name: "e2e", - steps: [ - [{ - name: "checkout", - template: "checkout", - }], - [ + dag: { + tasks: [ { - name: "setup", - template: "setup", + name: "checkout", + template: "checkout", }, + + { + local gkeSetup = { + name: "setup-gke", + template: "setup-gke", + dependencies: ["checkout"], + }, + + local minikubeSetup = { + name: "setup-minikube", + template: "setup-minikube", + dependencies: ["checkout"], + }, + + result:: if platform == "minikube" then + minikubeSetup + else + gkeSetup, + + }.result, { name: "create-pr-symlink", template: "create-pr-symlink", + dependencies: ["checkout"], }, - ], - [ + + { + name: "deploy-kubeflow", + template: "deploy-kubeflow", + dependencies: [ + if platform == "gke" then + "setup-gke" + else + if platform == "minikube" then + "setup-minikube" + else + "", + ], + }, + { name: "tfjob-test", template: "tfjob-test", + dependencies: ["deploy-kubeflow"], }, { name: "jsonnet-test", template: "jsonnet-test", + dependencies: ["checkout"], }, - ], - ], - }, + ], // tasks + }, // dag + }, // e2e template { name: "exit-handler", - steps: [ - [ + dag: { + tasks: [ { name: "teardown", - template: "teardown", + template: + if platform == "gke" then + "teardown-gke" + else + if platform == "minikube" then + "teardown-minikube" + else + "", + }, + { + name: "copy-artifacts", + template: "copy-artifacts", + dependencies: ["teardown"], }, ], - [{ - name: "copy-artifacts", - template: "copy-artifacts", - }], - ], - }, + }, // dag + }, // exit-handler buildTemplate( "checkout", ["/usr/local/bin/checkout.sh", srcRootDir], [{ name: "EXTRA_REPOS", - value: "kubeflow/tf-operator@HEAD;kubeflow/testing@HEAD", + value: "kubeflow/tf-operator@HEAD;kubeflow/testing@HEAD:83", }], - [], // no sidecars + [], // no sidecars ), - buildTemplate("setup", [ + // Setup and teardown using GKE. + buildTemplate("setup-gke", [ "python", "-m", "testing.test_deploy", + "--project=" + project, + "--namespace=" + stepsNamespace, + "--test_dir=" + testDir, + "--artifacts_dir=" + artifactsDir, + "get_gke_credentials", "--cluster=" + cluster, "--zone=" + zone, + ]), // setup + buildTemplate("teardown-gke", [ + "python", + "-m", + "testing.test_deploy", "--project=" + project, "--namespace=" + stepsNamespace, "--test_dir=" + testDir, "--artifacts_dir=" + artifactsDir, - "setup", - ]), // setup - buildTemplate("teardown", [ + "teardown", + ]), // teardown + // Setup and teardown using minikube + buildTemplate("setup-minikube", [ "python", "-m", "testing.test_deploy", "--project=" + project, - "--cluster=" + cluster, "--namespace=" + stepsNamespace, + "--test_dir=" + testDir, + "--artifacts_dir=" + artifactsDir, + "deploy_minikube", + "--vm_name=" + vmName, "--zone=" + zone, + ]), // setup + buildTemplate("teardown-minikube", [ + "python", + "-m", + "testing.test_deploy", + "--project=" + project, + "--namespace=" + stepsNamespace, "--test_dir=" + testDir, "--artifacts_dir=" + artifactsDir, - "teardown", + "teardown_minikube", + "--vm_name=" + vmName, + "--zone=" + zone, ]), // teardown + + buildTemplate("deploy-kubeflow", + [ + "python", + "-m", + "testing.test_deploy", + "--project=" + project, + "--namespace=" + stepsNamespace, + "--test_dir=" + testDir, + "--artifacts_dir=" + artifactsDir, + "deploy_kubeflow", + ] + ), // deploy-kubeflow buildTemplate("create-pr-symlink", [ "python", "-m", @@ -231,7 +336,7 @@ "--artifacts_dir=" + artifactsDir, "--test_files_dirs=" + srcDir + "/kubeflow", "--jsonnet_path_dirs=" + srcDir, - ]), // jsonnet-test + ]), // jsonnet-test buildTemplate("tfjob-test", [ "python", "-m", @@ -242,8 +347,10 @@ "--project=" + project, "--app_dir=" + tfOperatorRoot + "/test/workflows", "--component=simple_tfjob", - "--params=name=simple-tfjob,namespace=" + stepsNamespace, - "--junit_path=" + artifactsDir + "/junit_e2e.xml", + // Name is used for the test case name so it should be unique across + // all E2E tests. + "--params=name=simple-tfjob-" + platform + ",namespace=" + stepsNamespace, + "--junit_path=" + artifactsDir + "/junit_e2e_" + platform + ".xml", ]), // run tests ], // templates }, diff --git a/testing/workflows/environments/kubeflow-ci/main.jsonnet b/testing/workflows/environments/kubeflow-ci/main.jsonnet new file mode 100644 index 00000000000..a6363c7472f --- /dev/null +++ b/testing/workflows/environments/kubeflow-ci/main.jsonnet @@ -0,0 +1,7 @@ +local base = import "base.libsonnet"; +local k = import "k.libsonnet"; + +base + { + // Insert user-specified overrides here. For example if a component is named "nginx-deployment", you might have something like: + // "nginx-deployment"+: k.deployment.mixin.metadata.labels({foo: "bar"}) +} diff --git a/testing/workflows/environments/kubeflow-ci/params.libsonnet b/testing/workflows/environments/kubeflow-ci/params.libsonnet new file mode 100644 index 00000000000..9921fb58182 --- /dev/null +++ b/testing/workflows/environments/kubeflow-ci/params.libsonnet @@ -0,0 +1,10 @@ +local params = import "../../components/params.libsonnet"; +params + { + components +: { + // Insert component parameter overrides here. Ex: + // guestbook +: { + // name: "guestbook-dev", + // replicas: params.global.replicas, + // }, + }, +}