From f71e53971699f08cbdb58749a44029b4d2113d37 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Fri, 10 May 2019 16:40:36 -0700 Subject: [PATCH] List of Changes 1. Add kubectl apply command after setting labels 2. Integration tests for helms are back a. #1823: Use env template in skaffold release name 3. During helm deploy, build is assumed and hence if no build-artifacts are supplied, it fails with following error "no build present for gcr.io/k8s-skaffold/skaffold-helm" Since Build and Deploy are now separate ( #922 for notes) use the image value as is if not present as build artifacts Fix test fot this. TODO: 1. Add PR description 2. Figure out why we get 2 pods, 1 with valid labels which is running but another 1 with different labels. --- examples/helm-deployment/skaffold.yaml | 2 +- .../examples/helm-deployment/skaffold.yaml | 2 +- integration/helm_test.go | 83 +++++++++++++++++++ integration/run_test.go | 5 -- integration/util.go | 10 +++ pkg/skaffold/deploy/helm.go | 35 +++++--- pkg/skaffold/deploy/helm_test.go | 3 +- 7 files changed, 119 insertions(+), 21 deletions(-) create mode 100644 integration/helm_test.go diff --git a/examples/helm-deployment/skaffold.yaml b/examples/helm-deployment/skaffold.yaml index 62371e20026..64118271004 100644 --- a/examples/helm-deployment/skaffold.yaml +++ b/examples/helm-deployment/skaffold.yaml @@ -8,7 +8,7 @@ build: deploy: helm: releases: - - name: skaffold-helm + - name: skaffold-helm-{{.TEST_NS}} chartPath: skaffold-helm #wait: true #valuesFiles: diff --git a/integration/examples/helm-deployment/skaffold.yaml b/integration/examples/helm-deployment/skaffold.yaml index 62371e20026..64118271004 100644 --- a/integration/examples/helm-deployment/skaffold.yaml +++ b/integration/examples/helm-deployment/skaffold.yaml @@ -8,7 +8,7 @@ build: deploy: helm: releases: - - name: skaffold-helm + - name: skaffold-helm-{{.TEST_NS}} chartPath: skaffold-helm #wait: true #valuesFiles: diff --git a/integration/helm_test.go b/integration/helm_test.go new file mode 100644 index 00000000000..ff78936519a --- /dev/null +++ b/integration/helm_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "fmt" + "os" + "testing" + + "github.com/GoogleContainerTools/skaffold/integration/skaffold" + "github.com/google/go-cmp/cmp" +) + +func TestHelmDeploy(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + if os.Getenv("REMOTE_INTEGRATION") != "true" { + t.Skip("skipping remote only test") + } + + ns, client, deleteNs := SetupNamespace(t) + defer deleteNs() + + // To fix #1823, we make use of templating release name + env := []string{fmt.Sprintf("TEST_NS=%s", ns.Name)} + depName := fmt.Sprintf("skaffold-helm-%s", ns.Name) + helmDir := "examples/helm-deployment" + skaffold.Deploy().InDir(helmDir).InNs(ns.Name).WithEnv(env).RunOrFailOutput(t) + + client.WaitForDeploymentsToStabilize(depName) + + expectedLabels := map[string]string{ + "app.kubernetes.io/managed-by": "true", + "release": depName, + "skaffold.dev/deployer": "helm", + } + //check if labels are set correctly for deploument + dep := client.GetDeployment(depName) + extractLabels(t, fmt.Sprintf("Dep: %s", depName), expectedLabels, dep.ObjectMeta.Labels) + + //check if labels are set correctly for pods + pods := client.GetPods() + // FIX ME the first pod dies. + fmt.Println(pods.Items[1]) + po := pods.Items[0] + extractLabels(t, fmt.Sprintf("Pod:%s", po.ObjectMeta.Name), expectedLabels, po.ObjectMeta.Labels) + + skaffold.Delete().InDir(helmDir).InNs(ns.Name).WithEnv(env).RunOrFail(t) +} + +func extractLabels(t *testing.T, name string, expected map[string]string, actual map[string]string) { + extracted := map[string]string{} + for k, v := range actual { + switch k { + case "app.kubernetes.io/managed-by": + extracted[k] = "true" // ignore version since its runtime + case "release": + extracted[k] = v + case "skaffold.dev/deployer": + extracted[k] = v + default: + continue + } + } + if d := cmp.Diff(extracted, expected); d != "" { + t.Errorf("expected to see %s labels for %s. Diff %s", expected, name, d) + } +} diff --git a/integration/run_test.go b/integration/run_test.go index 73ab8022e98..97e18619793 100644 --- a/integration/run_test.go +++ b/integration/run_test.go @@ -94,11 +94,6 @@ func TestRun(t *testing.T) { dir: "testdata/kaniko-microservices", deployments: []string{"leeroy-app", "leeroy-web"}, remoteOnly: true, - // }, { - // description: "helm", - // dir: "examples/helm-deployment", - // deployments: []string{"skaffold-helm"}, - // remoteOnly: true, }, { description: "jib in googlecloudbuild", dir: "testdata/jib", diff --git a/integration/util.go b/integration/util.go index a7b9e4469d7..a6317555840 100644 --- a/integration/util.go +++ b/integration/util.go @@ -130,6 +130,16 @@ func (k *NSKubernetesClient) GetDeployment(depName string) *appsv1.Deployment { return dep } +// GetPods gets all pods in the client namespace +func (k *NSKubernetesClient) GetPods() *v1.PodList { + pods, err := k.client.CoreV1().Pods(k.ns).List(meta_v1.ListOptions{}) + if err != nil { + k.t.Fatalf("Could not find pods for in namespace %s", k.ns) + } + return pods +} + + // WaitForDeploymentsToStabilize waits for a list of deployments to become stable. func (k *NSKubernetesClient) WaitForDeploymentsToStabilize(depNames ...string) { if len(depNames) == 0 { diff --git a/pkg/skaffold/deploy/helm.go b/pkg/skaffold/deploy/helm.go index 740227a6af9..f9d9599984e 100644 --- a/pkg/skaffold/deploy/helm.go +++ b/pkg/skaffold/deploy/helm.go @@ -49,6 +49,7 @@ type HelmDeployer struct { kubeContext string namespace string defaultRepo string + kubectl kubectl.CLI forceDeploy bool } @@ -58,6 +59,10 @@ func NewHelmDeployer(runCtx *runcontext.RunContext) *HelmDeployer { return &HelmDeployer{ HelmDeploy: runCtx.Cfg.Deploy.HelmDeploy, kubeContext: runCtx.KubeContext, + kubectl: kubectl.CLI{ + Namespace: runCtx.Opts.Namespace, + KubeContext: runCtx.KubeContext, + }, namespace: runCtx.Opts.Namespace, defaultRepo: runCtx.DefaultRepo, forceDeploy: runCtx.Opts.ForceDeploy(), @@ -88,6 +93,13 @@ func (h *HelmDeployer) Deploy(ctx context.Context, out io.Writer, builds []build event.DeployFailed(err) return errors.Wrap(err, "setting labels in manifests") } + + err = h.kubectl.Apply(ctx, out, manifests) + if err != nil { + event.DeployFailed(err) + return errors.Wrap(err, "kubectl error while applying labels in manifests") + } + event.DeployComplete() return nil } @@ -171,11 +183,9 @@ func (h *HelmDeployer) deployRelease(ctx context.Context, out io.Writer, r lates color.Red.Fprintf(out, "Helm release %s not installed. Installing...\n", releaseName) isInstalled = false } - params, err := h.joinTagsToBuildResult(builds, r.Values) - if err != nil { - return "", errors.Wrap(err, "matching build results to chart values") - } + params := h.joinTagsToBuildResult(builds, r.Values) + logrus.Warn(params) var setOpts []string for k, v := range params { setOpts = append(setOpts, "--set") @@ -409,22 +419,23 @@ func (h *HelmDeployer) deleteRelease(ctx context.Context, out io.Writer, r lates return nil } -func (h *HelmDeployer) joinTagsToBuildResult(builds []build.Artifact, params map[string]string) (map[string]build.Artifact, error) { +func (h *HelmDeployer) joinTagsToBuildResult(builds []build.Artifact, params map[string]string) map[string]build.Artifact { imageToBuildResult := map[string]build.Artifact{} - for _, build := range builds { - imageToBuildResult[build.ImageName] = build + for _, b := range builds { + imageToBuildResult[b.ImageName] = b } - paramToBuildResult := map[string]build.Artifact{} for param, imageName := range params { newImageName := util.SubstituteDefaultRepoIntoImage(h.defaultRepo, imageName) - build, ok := imageToBuildResult[newImageName] + b, ok := imageToBuildResult[newImageName] if !ok { - return nil, fmt.Errorf("no build present for %s", imageName) + logrus.Warnf("no build present for %s", imageName) + logrus.Warnf("continuing with %s", imageName) + b = build.Artifact{ImageName: imageName, Tag: imageName} } - paramToBuildResult[param] = build + paramToBuildResult[param] = b } - return paramToBuildResult, nil + return paramToBuildResult } func evaluateReleaseName(nameTemplate string) (string, error) { diff --git a/pkg/skaffold/deploy/helm_test.go b/pkg/skaffold/deploy/helm_test.go index 202fde461ff..ec05eb5e4f7 100644 --- a/pkg/skaffold/deploy/helm_test.go +++ b/pkg/skaffold/deploy/helm_test.go @@ -242,11 +242,10 @@ func TestHelmDeploy(t *testing.T) { builds: testBuilds, }, { - description: "deploy error unmatched parameter", + description: "deploy should not error for unmatched parameter", cmd: &MockHelm{t: t}, runContext: makeRunContext(testDeployConfigParameterUnmatched, false), builds: testBuilds, - shouldErr: true, }, { description: "deploy success remote chart with skipBuildDependencies",