Skip to content

Commit

Permalink
💚 cluster should have healthy time synchronization
Browse files Browse the repository at this point in the history
  • Loading branch information
mboersma committed Oct 14, 2020
1 parent 3da52a3 commit a1276e0
Show file tree
Hide file tree
Showing 5 changed files with 146 additions and 2 deletions.
30 changes: 30 additions & 0 deletions test/e2e/azure_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,16 @@ var _ = Describe("Workload cluster creation", func() {
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
})
cluster = result.Cluster

Context("Validating time synchronization", func() {
AzureTimeSyncSpec(ctx, func() AzureTimeSyncSpecInput {
return AzureTimeSyncSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
})
})
})

Expand All @@ -121,6 +131,16 @@ var _ = Describe("Workload cluster creation", func() {
})
cluster = result.Cluster

Context("Validating time synchronization", func() {
AzureTimeSyncSpec(ctx, func() AzureTimeSyncSpecInput {
return AzureTimeSyncSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
})

Context("Validating failure domains", func() {
AzureFailureDomainsSpec(ctx, func() AzureFailureDomainsSpecInput {
return AzureFailureDomainsSpecInput{
Expand Down Expand Up @@ -186,6 +206,16 @@ var _ = Describe("Workload cluster creation", func() {
})
cluster = result.Cluster

Context("Validating time synchronization", func() {
AzureTimeSyncSpec(ctx, func() AzureTimeSyncSpecInput {
return AzureTimeSyncSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
})

Context("Creating an accessible ipv6 load balancer", func() {
AzureLBSpec(ctx, func() AzureLBSpecInput {
return AzureLBSpecInput{
Expand Down
92 changes: 92 additions & 0 deletions test/e2e/azure_timesync.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
// +build e2e

/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"
"fmt"
"strings"

. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/util"
kinderrors "sigs.k8s.io/kind/pkg/errors"
)

// AzureTimeSyncSpecInput is the input for AzureTimeSyncSpec.
type AzureTimeSyncSpecInput struct {
BootstrapClusterProxy framework.ClusterProxy
Namespace *corev1.Namespace
ClusterName string
}

// AzureTimeSyncSpec implements a test that verifies time synchronization is healthy for
// the nodes in a cluster.
func AzureTimeSyncSpec(ctx context.Context, inputGetter func() AzureTimeSyncSpecInput) {
var (
specName = "azure-timesync"
input AzureTimeSyncSpecInput
)

input = inputGetter()
Expect(input.BootstrapClusterProxy).NotTo(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName)

namespace, name := input.Namespace.Name, input.ClusterName
managementClusterClient := input.BootstrapClusterProxy.GetClient()
machines, err := getMachinesInCluster(ctx, managementClusterClient, namespace, name)
Expect(err).NotTo(HaveOccurred(), "Failed to get machines for the %s/%s cluster", namespace, name)

testfuncs := []func() error{}
for i := range machines.Items {
m := &machines.Items[i]
Byf("checking that time synchronization is healthy on %s", m.Name)
cluster, err := util.GetClusterFromMetadata(ctx, managementClusterClient, m.ObjectMeta)
Expect(err).NotTo(HaveOccurred())
controlPlaneEndpoint := cluster.Spec.ControlPlaneEndpoint.Host
hostname := m.Spec.InfrastructureRef.Name

execToStringFn := func(expected, command string, args ...string) func() error {
// don't assert in this test func, just return errors
return func() error {
f := &strings.Builder{}
if err := execOnHost(controlPlaneEndpoint, hostname, f, command, args...); err != nil {
return err
}
if !strings.Contains(f.String(), expected) {
return fmt.Errorf("expected \"%s\" in command output:\n%s", expected, f.String())
}
return nil
}
}

testfuncs = append(testfuncs,
execToStringFn(
"active",
"systemctl", "is-active", "chronyd",
),
execToStringFn(
"Reference ID",
"chronyc", "tracking",
),
)
}

Expect(kinderrors.AggregateConcurrent(testfuncs)).To(Succeed())
}
2 changes: 2 additions & 0 deletions test/e2e/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ const (
AzureVNetName = "AZURE_VNET_NAME"
CNIPathIPv6 = "CNI_IPV6"
CNIResourcesIPv6 = "CNI_RESOURCES_IPV6"
VMSSHPort = "VM_SSH_PORT"
VMSSSSHPort = "VMSS_SSH_PORT"
)

func Byf(format string, a ...interface{}) {
Expand Down
4 changes: 3 additions & 1 deletion test/e2e/config/azure-dev.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ providers:
- sourcePath: "${PWD}/templates/test/cluster-template-prow-ipv6.yaml"
targetName: "cluster-template-ipv6.yaml"
- sourcePath: "../data/infrastructure-azure/cluster-template-mhc.yaml"
targetName: "cluster-template-mhc.yaml"
targetName: "cluster-template-mhc.yaml"

variables:
KUBERNETES_VERSION: "${KUBERNETES_VERSION:-v1.18.8}"
Expand All @@ -65,6 +65,8 @@ variables:
EXP_AKS: "true"
EXP_MACHINE_POOL: "true"
EXP_CLUSTER_RESOURCE_SET: "true"
VM_SSH_PORT: "22"
VMSS_SSH_PORT: "50001"

intervals:
default/wait-controllers: ["3m", "10s"]
Expand Down
20 changes: 19 additions & 1 deletion test/e2e/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ import (
typedbatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
azure "sigs.k8s.io/cluster-api-provider-azure/cloud"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/controller-runtime/pkg/client"
)
Expand Down Expand Up @@ -284,6 +285,23 @@ func logCheckpoint(specTimes map[string]time.Time) {
}
}

// getMachinesInCluster returns a list of all machines in the given cluster.
// This is copied from CAPI's test/framework/cluster_proxy.go.
func getMachinesInCluster(ctx context.Context, c framework.Lister, namespace, name string) (*clusterv1.MachineList, error) {
if name == "" {
return nil, nil
}

machineList := &clusterv1.MachineList{}
labels := map[string]string{clusterv1.ClusterLabelName: name}

if err := c.List(ctx, machineList, client.InNamespace(namespace), client.MatchingLabels(labels)); err != nil {
return nil, err
}

return machineList, nil
}

// execOnHost runs the specified command directly on a node's host, using an SSH connection
// proxied through a control plane host.
func execOnHost(controlPlaneEndpoint, hostname string, f io.StringWriter, command string,
Expand All @@ -292,7 +310,7 @@ func execOnHost(controlPlaneEndpoint, hostname string, f io.StringWriter, comman
if err != nil {
return err
}
port := "22" // Need to use port 50001 for VMSS when MachinePools are supported here.
port := e2eConfig.GetVariable(VMSSHPort) // Or VMSSSSHPort when MachinePools are supported

// Init a client connection to a control plane node via the public load balancer
lbClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", controlPlaneEndpoint, port), config)
Expand Down

0 comments on commit a1276e0

Please sign in to comment.