-
Notifications
You must be signed in to change notification settings - Fork 1.3k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add initial clusterclass integration test
- Loading branch information
1 parent
f596cbd
commit 5bdb963
Showing
3 changed files
with
492 additions
and
7 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,328 @@ | ||
package topology | ||
|
||
import ( | ||
"context" | ||
"log" | ||
"time" | ||
|
||
"sigs.k8s.io/cluster-api/controllers/topology/internal/contract" | ||
|
||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||
|
||
. "github.com/onsi/gomega" | ||
utilfeature "k8s.io/component-base/featuregate/testing" | ||
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" | ||
"sigs.k8s.io/cluster-api/feature" | ||
"sigs.k8s.io/cluster-api/internal/builder" | ||
|
||
"testing" | ||
|
||
"sigs.k8s.io/cluster-api/controllers/topology/internal/scope" | ||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
) | ||
|
||
func TestClusterReconciler_reconcile(t *testing.T) { | ||
g := NewWithT(t) | ||
timeout := 5 * time.Second | ||
ns, err := env.CreateNamespace(ctx, "test-machine-watches") | ||
g.Expect(err).ToNot(HaveOccurred()) | ||
clusterName := "cluster1" | ||
workerClassName := "linux-worker" | ||
infrastructureMachineTemplate := builder.InfrastructureMachineTemplate(ns.Name, "inframachinetemplate").Build() | ||
infrastructureCluster := builder.InfrastructureClusterTemplate(ns.Name, "infraclustertemplate"). | ||
WithSpecFields(map[string]interface{}{"spec.template.spec.fakeSetting": true}). | ||
Build() | ||
controlPlane := builder.ControlPlaneTemplate(ns.Name, "cp1"). | ||
WithInfrastructureMachineTemplate(infrastructureMachineTemplate). | ||
Build() | ||
bootstrapTemplate := builder.BootstrapTemplate(ns.Name, "bootstraptemplate").Build() | ||
machineDeploymentTopology := builder.MachineDeploymentTopology(workerClassName). | ||
WithName("mdm1"). | ||
WithReplicas(3). | ||
Build() | ||
machineDeploymentClass := builder.MachineDeploymentClass(ns.Name, "md1"). | ||
WithClass(workerClassName). | ||
WithInfrastructureTemplate(infrastructureMachineTemplate). | ||
WithBootstrapTemplate(bootstrapTemplate). | ||
WithLabels(map[string]string{"foo": "bar"}). | ||
WithAnnotations(map[string]string{"foo": "bar"}). | ||
Build() | ||
clusterClass := builder.ClusterClass(ns.Name, "class1"). | ||
WithInfrastructureClusterTemplate(infrastructureCluster). | ||
WithControlPlaneTemplate(controlPlane). | ||
WithControlPlaneInfrastructureMachineTemplate(infrastructureMachineTemplate). | ||
WithWorkerMachineDeploymentClasses([]clusterv1.MachineDeploymentClass{*machineDeploymentClass}). | ||
Build() | ||
cluster := builder.Cluster(ns.Name, clusterName). | ||
WithTopology( | ||
builder.ClusterTopology(clusterClass.Name). | ||
WithWorkerTopology(machineDeploymentTopology). | ||
WithVersion("1.22.2"). | ||
WithControlPlaneReplicas(3). | ||
Build()). | ||
Build() | ||
|
||
type args struct { | ||
ctx context.Context | ||
s *scope.Scope | ||
} | ||
tests := []struct { | ||
name string | ||
args args | ||
initObjs []client.Object | ||
want []client.Object | ||
wantErr bool | ||
}{ | ||
{ | ||
name: "getting things up and running", | ||
initObjs: []client.Object{ | ||
clusterClass, | ||
cluster, | ||
infrastructureCluster, | ||
infrastructureMachineTemplate, | ||
bootstrapTemplate, | ||
controlPlane, | ||
}, | ||
want: []client.Object{ | ||
cluster, | ||
}, | ||
}, | ||
} | ||
for _, tt := range tests { | ||
t.Run(tt.name, func(t *testing.T) { | ||
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() | ||
|
||
for _, obj := range tt.initObjs { | ||
err := env.Create(ctx, obj) | ||
g.Expect(err).ToNot(HaveOccurred()) | ||
} | ||
actualCluster := &clusterv1.Cluster{} | ||
// Get Cluster object and check that relevant fields are set as expected. | ||
g.Eventually(func() bool { | ||
key := client.ObjectKey{Name: clusterName, Namespace: ns.Name} | ||
if err := env.Get(ctx, key, actualCluster); err != nil { | ||
return false | ||
} | ||
// Check if relevant managed topology labels are present. | ||
if _, ok := actualCluster.Labels[clusterv1.ClusterTopologyOwnedLabel]; !ok { | ||
log.Printf("We failed on labels %v", actualCluster) | ||
return false | ||
} | ||
// Check if InfrastructureRef and ControlPlaneRef are set. | ||
if actualCluster.Spec.InfrastructureRef == nil || actualCluster.Spec.ControlPlaneRef == nil { | ||
log.Printf("We failed on not nil %v", actualCluster) | ||
return false | ||
} | ||
// Check if InfrastructureRef is of the expected Kind. | ||
if actualCluster.Spec.InfrastructureRef.Kind != builder.GenericInfrastructureClusterKind { | ||
log.Printf("We failed on infraref %v", actualCluster) | ||
return false | ||
} | ||
// Check if ControlPlaneRef is of the expected Kind. | ||
if actualCluster.Spec.ControlPlaneRef.Kind != builder.GenericControlPlaneKind { | ||
log.Printf("We failed on controlPlane ref %v", actualCluster) | ||
return false | ||
} | ||
log.Printf("Cluster passed with %v", actualCluster) | ||
return true | ||
}, timeout).Should(BeTrue()) | ||
|
||
// Check if InfrastructureRef exists and has the correct labels and annotations. | ||
g.Eventually(func() bool { | ||
key := client.ObjectKey{Name: actualCluster.Spec.InfrastructureRef.Name, Namespace: ns.Name} | ||
got := &unstructured.Unstructured{} | ||
got.SetKind(actualCluster.Spec.InfrastructureRef.Kind) | ||
got.SetAPIVersion(actualCluster.Spec.InfrastructureRef.APIVersion) | ||
if err := env.Get(ctx, key, got); err != nil { | ||
log.Printf("We failed on get %v %v", key, got) | ||
log.Printf(err.Error()) | ||
return false | ||
} | ||
if _, ok := got.GetLabels()[clusterv1.ClusterTopologyOwnedLabel]; !ok { | ||
log.Printf("We failed on owned label %v", got) | ||
return false | ||
} | ||
|
||
if v, ok := got.GetLabels()[clusterv1.ClusterLabelName]; !ok { | ||
if v != clusterName { | ||
log.Printf("We failed on clustername label %v", got) | ||
return false | ||
} | ||
} | ||
if _, ok := got.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation]; !ok { | ||
log.Printf("We failed on template annotation %v", got) | ||
return false | ||
} | ||
log.Printf("InfrastructureClusterRef pass with %v", got) | ||
return true | ||
}, timeout).Should(BeTrue()) | ||
|
||
// Check if ControlPlaneRef exists and has the correct labels and annotations. | ||
cp := &unstructured.Unstructured{} | ||
g.Eventually(func() bool { | ||
key := client.ObjectKey{Name: actualCluster.Spec.ControlPlaneRef.Name, Namespace: ns.Name} | ||
cp.SetKind(actualCluster.Spec.ControlPlaneRef.Kind) | ||
cp.SetAPIVersion(actualCluster.Spec.ControlPlaneRef.APIVersion) | ||
if err := env.Get(ctx, key, cp); err != nil { | ||
log.Printf("We failed on get %v %v", key, cp) | ||
log.Printf(err.Error()) | ||
return false | ||
} | ||
if _, ok := cp.GetLabels()[clusterv1.ClusterTopologyOwnedLabel]; !ok { | ||
log.Printf("We failed on owned label %v", cp) | ||
return false | ||
} | ||
|
||
if v, ok := cp.GetLabels()[clusterv1.ClusterLabelName]; !ok { | ||
if v != clusterName { | ||
log.Printf("We failed on clustername label %v", cp) | ||
return false | ||
} | ||
} | ||
if _, ok := cp.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation]; !ok { | ||
log.Printf("We failed on template annotation %v", cp) | ||
return false | ||
} | ||
version, err := contract.ControlPlane().Version().Get(cp) | ||
if err != nil { | ||
log.Printf("We failed on getting control plane version %v %v", err, cp) | ||
} | ||
if *version != cluster.Spec.Topology.Version { | ||
log.Printf("We failed on comparing control plane version %v", cp) | ||
return false | ||
} | ||
// Check for Control Plane replicase if it's set in the Cluster.Spec.Topology | ||
if cluster.Spec.Topology.ControlPlane.Replicas != nil { | ||
replicas, err := contract.ControlPlane().Replicas().Get(cp) | ||
if err != nil { | ||
log.Printf("We failed on getting control plane replicas %v", cp) | ||
return false | ||
} | ||
if int32(*replicas) != *cluster.Spec.Topology.ControlPlane.Replicas { | ||
log.Printf("We failed on comparing control plane replicas %v", cp) | ||
return false | ||
} | ||
} | ||
log.Printf("ControlPlane passed with %v", cp) | ||
return true | ||
}, timeout).Should(BeTrue()) | ||
|
||
// If the clusterClass defines an underlying InfrastructureTemplate for the control plane check that it exists and has the correct GVK. | ||
// Check if the correct InfrastructureMachine object has been created. | ||
if clusterClass.Spec.ControlPlane.MachineInfrastructure != nil && clusterClass.Spec.ControlPlane.MachineInfrastructure.Ref != nil { | ||
g.Eventually(func() bool { | ||
cpInfra, err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(cp) | ||
if err != nil { | ||
log.Printf("We failed on getting control plane infrastructure %v, %v", err, cp) | ||
} | ||
if cpInfra.Kind != builder.GenericInfrastructureMachineTemplateKind { | ||
log.Printf("Kind wrong fro InfrastructureMachineTemplate %v", cp) | ||
return false | ||
} | ||
key := client.ObjectKey{Name: cpInfra.Name, Namespace: ns.Name} | ||
got := &unstructured.Unstructured{} | ||
got.SetKind(builder.GenericInfrastructureMachineTemplateKind) | ||
got.SetAPIVersion(builder.InfrastructureGroupVersion.String()) | ||
if err := env.Get(ctx, key, got); err != nil { | ||
log.Printf("We failed on get %v %v", key, got) | ||
log.Printf(err.Error()) | ||
return false | ||
} | ||
if _, ok := got.GetLabels()[clusterv1.ClusterTopologyOwnedLabel]; !ok { | ||
log.Printf("We failed on owned label %v", got) | ||
return false | ||
} | ||
if v, ok := got.GetLabels()[clusterv1.ClusterLabelName]; !ok { | ||
if v != clusterName { | ||
log.Printf("We failed on clustername label %v", got) | ||
return false | ||
} | ||
} | ||
if _, ok := got.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation]; !ok { | ||
log.Printf("We failed on template annotation %v", got) | ||
return false | ||
} | ||
// Check if ControlPlaneInfrastructureRef exists and has the correct labels and annotations. | ||
log.Printf("ControlPlaneInfrastructure passed with %v", got) | ||
return true | ||
}, timeout).Should(BeTrue()) | ||
} | ||
|
||
g.Eventually(func() bool { | ||
machineDeployments := &clusterv1.MachineDeploymentList{} | ||
err := env.List(ctx, machineDeployments) | ||
if err != nil { | ||
log.Printf("machine deployments not lists %v", err) | ||
return false | ||
} | ||
clusterMDs := make([]clusterv1.MachineDeployment, 0) | ||
// Run through all machine deployments and add only those with the TopologyOwnedLable and the correct | ||
// ClusterLabelName to the items for further testing. | ||
for _, md := range machineDeployments.Items { | ||
if _, ok := md.GetLabels()[clusterv1.ClusterTopologyOwnedLabel]; !ok { | ||
|
||
} | ||
if v, ok := md.GetLabels()[clusterv1.ClusterLabelName]; !ok || v != clusterName { | ||
continue | ||
} | ||
clusterMDs = append(clusterMDs, md) | ||
|
||
} | ||
// If the total number of machine deployments is not as expected return false. | ||
if len(clusterMDs) != len(clusterClass.Spec.Workers.MachineDeployments) { | ||
return false | ||
} | ||
for _, md := range clusterMDs { | ||
// this can be used to pick out specific machineDeployments for testing. | ||
if md.GetLabels()[clusterv1.ClusterTopologyMachineDeploymentLabelName] != cluster.Spec.Topology.Workers.MachineDeployments[0].Name { | ||
log.Printf("We failed on deployment label name%v", md) | ||
return false | ||
} | ||
if _, ok := md.GetLabels()[clusterv1.ClusterTopologyOwnedLabel]; !ok { | ||
log.Printf("We failed on owned label %v", md) | ||
return false | ||
} | ||
|
||
if v, ok := md.GetLabels()[clusterv1.ClusterLabelName]; !ok { | ||
if v != clusterName { | ||
log.Printf("We failed on clustername label %v", md) | ||
return false | ||
} | ||
} | ||
if _, ok := md.GetLabels()[clusterv1.ClusterTopologyMachineDeploymentLabelName]; !ok { | ||
log.Printf("We failed on deployment label name%v", md) | ||
return false | ||
} | ||
// Assume there's only one MachineDeployment created so this can be matched naively. | ||
if *md.Spec.Replicas != *cluster.Spec.Topology.Workers.MachineDeployments[0].Replicas { | ||
log.Printf("We failed on deployment replicas%v %v", md.Spec.Replicas, cluster.Spec.Topology.Workers.MachineDeployments[0].Replicas) | ||
return false | ||
} | ||
if *md.Spec.Template.Spec.Version != cluster.Spec.Topology.Version { | ||
log.Printf("We failed on deployment version%v", md) | ||
return false | ||
} | ||
// test here to ensure that labels an annotations are being properly propogated | ||
log.Printf("%v", md.Spec.Template.Annotations) | ||
log.Printf("%v", md.Spec.Template.Labels) | ||
|
||
// Check if the infrastructureReference exists. | ||
if md.Spec.Template.Spec.InfrastructureRef.Kind != builder.GenericInfrastructureMachineKind && | ||
md.Spec.Template.Spec.InfrastructureRef.APIVersion != builder.InfrastructureGroupVersion.String() { | ||
log.Printf("We failed on infrstructure reference validation%v", md) | ||
|
||
return false | ||
} | ||
|
||
if md.Spec.Template.Spec.Bootstrap.ConfigRef.Kind != builder.GenericBootstrapConfigKind && | ||
md.Spec.Template.Spec.Bootstrap.ConfigRef.APIVersion != builder.BootstrapGroupVersion.String() { | ||
log.Printf("We failed on bootstrap validation%v", md) | ||
return false | ||
} | ||
} | ||
return true | ||
}, timeout).Should(BeTrue()) | ||
|
||
}) | ||
} | ||
} |
Oops, something went wrong.