forked from kubernetes-sigs/cluster-api-provider-aws
-
Notifications
You must be signed in to change notification settings - Fork 44
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
E2E: verify node roles and node machine matching
- Deploy the nodelink-controller with deploying the cluster API stack. - Recreate the cluster API stack in random namespaces before a spec is run. - Testing for nore roles and machine-node matching requires a new cluster to be deployed via the cluster API. Thus, locally build aws actuator image needs to be uploaded to newly provisioned master node. - Put semantically closed operations into methods/functions so they can be reused in specs. - Scale down before deleting deployments/statefulset. It can take some time before the deployments/statefullset is actually deleted. New cluster API stack can be deployed even though there are already existing deployments from any previous run as long as there is zero replicas of previously created pods. - Some of the code is still aws actuator specific. Left for improvements in upcoming PR(s).
- Loading branch information
1 parent
ceb836b
commit b16dbee
Showing
10 changed files
with
1,448 additions
and
323 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,179 @@ | ||
package framework | ||
|
||
import ( | ||
"fmt" | ||
|
||
"k8s.io/apimachinery/pkg/util/wait" | ||
|
||
. "github.com/onsi/ginkgo" | ||
. "github.com/onsi/gomega" | ||
"github.com/prometheus/common/log" | ||
|
||
"sigs.k8s.io/cluster-api-provider-aws/test/utils" | ||
|
||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
) | ||
|
||
func (f *Framework) DeployClusterAPIStack(clusterAPINamespace string) { | ||
|
||
By("Deploy cluster API stack components") | ||
certsSecret, apiAPIService, err := utils.ClusterAPIServerAPIServiceObjects(clusterAPINamespace) | ||
Expect(err).NotTo(HaveOccurred()) | ||
_, err = f.KubeClient.CoreV1().Secrets(certsSecret.Namespace).Create(certsSecret) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
err = wait.Poll(PollInterval, PoolTimeout, func() (bool, error) { | ||
if _, err := f.KubeClient.CoreV1().Secrets(certsSecret.Namespace).Get(certsSecret.Name, metav1.GetOptions{}); err != nil { | ||
return false, nil | ||
} | ||
return true, nil | ||
}) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
_, err = f.APIRegistrationClient.Apiregistration().APIServices().Create(apiAPIService) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
apiService := utils.ClusterAPIService(clusterAPINamespace) | ||
_, err = f.KubeClient.CoreV1().Services(apiService.Namespace).Create(apiService) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
clusterAPIDeployment := utils.ClusterAPIDeployment(clusterAPINamespace) | ||
_, err = f.KubeClient.AppsV1beta2().Deployments(clusterAPIDeployment.Namespace).Create(clusterAPIDeployment) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
clusterAPIControllersDeployment := utils.ClusterAPIControllersDeployment(clusterAPINamespace) | ||
_, err = f.KubeClient.AppsV1beta2().Deployments(clusterAPIDeployment.Namespace).Create(clusterAPIControllersDeployment) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
clusterAPIRoleBinding := utils.ClusterAPIRoleBinding(clusterAPINamespace) | ||
_, err = f.KubeClient.RbacV1().RoleBindings(clusterAPIRoleBinding.Namespace).Create(clusterAPIRoleBinding) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
clusterAPIEtcdCluster := utils.ClusterAPIEtcdCluster(clusterAPINamespace) | ||
_, err = f.KubeClient.AppsV1beta2().StatefulSets(clusterAPIEtcdCluster.Namespace).Create(clusterAPIEtcdCluster) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
etcdService := utils.ClusterAPIEtcdService(clusterAPINamespace) | ||
_, err = f.KubeClient.CoreV1().Services(etcdService.Namespace).Create(etcdService) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
By("Waiting for cluster API stack to come up") | ||
err = wait.Poll(PollInterval, PoolClusterAPIDeploymentTimeout, func() (bool, error) { | ||
if deployment, err := f.KubeClient.AppsV1beta2().Deployments(clusterAPIDeployment.Namespace).Get(clusterAPIDeployment.Name, metav1.GetOptions{}); err == nil { | ||
// Check all the pods are running | ||
log.Infof("Waiting for all cluster-api deployment pods to be ready, have %v, expecting 1", deployment.Status.ReadyReplicas) | ||
if deployment.Status.ReadyReplicas < 1 { | ||
return false, nil | ||
} | ||
return true, nil | ||
} | ||
|
||
return false, nil | ||
}) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
By("Cluster API stack deployed") | ||
} | ||
|
||
func (f *Framework) DestroyClusterAPIStack(clusterAPINamespace string) { | ||
var orphanDeletepolicy metav1.DeletionPropagation = "Orphan" | ||
var zero int64 = 0 | ||
|
||
By("Deleting etcd service") | ||
etcdService := utils.ClusterAPIEtcdService(clusterAPINamespace) | ||
err := WaitUntilDeleted(func() error { | ||
return f.KubeClient.CoreV1().Services(etcdService.Namespace).Delete(etcdService.Name, &metav1.DeleteOptions{}) | ||
}, func() error { | ||
_, err := f.KubeClient.CoreV1().Services(etcdService.Namespace).Get(etcdService.Name, metav1.GetOptions{}) | ||
return err | ||
}) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
By("Scaling down etcd cluster") | ||
clusterAPIEtcdCluster := utils.ClusterAPIEtcdCluster(clusterAPINamespace) | ||
f.ScaleSatefulSetDownToZero(clusterAPIEtcdCluster) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
By("Deleting etcd cluster") | ||
WaitUntilDeleted(func() error { | ||
return f.KubeClient.AppsV1beta2().StatefulSets(clusterAPIEtcdCluster.Namespace).Delete(clusterAPIEtcdCluster.Name, &metav1.DeleteOptions{PropagationPolicy: &orphanDeletepolicy, GracePeriodSeconds: &zero}) | ||
}, func() error { | ||
obj, err := f.KubeClient.AppsV1beta2().StatefulSets(clusterAPIEtcdCluster.Namespace).Get(clusterAPIEtcdCluster.Name, metav1.GetOptions{}) | ||
fmt.Printf("obj: %#v\n", obj) | ||
return err | ||
}) | ||
// Ignore the error, the deployment has 0 replicas. | ||
// No longer affecting future deployments since it lives in a different namespace. | ||
|
||
By("Deleting role binding") | ||
clusterAPIRoleBinding := utils.ClusterAPIRoleBinding(clusterAPINamespace) | ||
err = WaitUntilDeleted(func() error { | ||
return f.KubeClient.RbacV1().RoleBindings(clusterAPIRoleBinding.Namespace).Delete(clusterAPIRoleBinding.Name, &metav1.DeleteOptions{}) | ||
}, func() error { | ||
_, err := f.KubeClient.RbacV1().RoleBindings(clusterAPIRoleBinding.Namespace).Get(clusterAPIRoleBinding.Name, metav1.GetOptions{}) | ||
return err | ||
}) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
clusterAPIControllersDeployment := utils.ClusterAPIControllersDeployment(clusterAPINamespace) | ||
By("Scaling down controllers deployment") | ||
err = f.ScaleDeploymentDownToZero(clusterAPIControllersDeployment) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
By("Deleting controllers deployment") | ||
WaitUntilDeleted(func() error { | ||
return f.KubeClient.AppsV1beta2().Deployments(clusterAPIControllersDeployment.Namespace).Delete(clusterAPIControllersDeployment.Name, &metav1.DeleteOptions{PropagationPolicy: &orphanDeletepolicy, GracePeriodSeconds: &zero}) | ||
}, func() error { | ||
_, err := f.KubeClient.AppsV1beta2().Deployments(clusterAPIControllersDeployment.Namespace).Get(clusterAPIControllersDeployment.Name, metav1.GetOptions{}) | ||
return err | ||
}) | ||
// Ignore the error, the deployment has 0 replicas. | ||
// No longer affecting future deployments since it lives in a different namespace. | ||
|
||
clusterAPIDeployment := utils.ClusterAPIDeployment(clusterAPINamespace) | ||
By("Scaling down apiserver deployment") | ||
err = f.ScaleDeploymentDownToZero(clusterAPIDeployment) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
By("Deleting apiserver deployment") | ||
WaitUntilDeleted(func() error { | ||
return f.KubeClient.AppsV1beta2().Deployments(clusterAPIDeployment.Namespace).Delete(clusterAPIDeployment.Name, &metav1.DeleteOptions{PropagationPolicy: &orphanDeletepolicy, GracePeriodSeconds: &zero}) | ||
}, func() error { | ||
_, err := f.KubeClient.AppsV1beta2().Deployments(clusterAPIDeployment.Namespace).Get(clusterAPIDeployment.Name, metav1.GetOptions{}) | ||
return err | ||
}) | ||
// Ignore the error, the deployment has 0 replicas. | ||
// No longer affecting future deployments since it lives in a different namespace. | ||
|
||
By("Deleting cluster api service") | ||
apiService := utils.ClusterAPIService(clusterAPINamespace) | ||
err = WaitUntilDeleted(func() error { | ||
return f.KubeClient.CoreV1().Services(apiService.Namespace).Delete(apiService.Name, &metav1.DeleteOptions{}) | ||
}, func() error { | ||
_, err := f.KubeClient.CoreV1().Services(apiService.Namespace).Get(apiService.Name, metav1.GetOptions{}) | ||
return err | ||
}) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
// Even though the certs are different, only the secret name(space) and apiservice name(space) are actually used | ||
certsSecret, apiAPIService, err := utils.ClusterAPIServerAPIServiceObjects(clusterAPINamespace) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
By("Deleting cluster api api service") | ||
err = WaitUntilDeleted(func() error { | ||
return f.APIRegistrationClient.Apiregistration().APIServices().Delete(apiAPIService.Name, &metav1.DeleteOptions{}) | ||
}, func() error { | ||
_, err := f.APIRegistrationClient.Apiregistration().APIServices().Get(apiAPIService.Name, metav1.GetOptions{}) | ||
return err | ||
}) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
By("Deleting api server certs") | ||
err = WaitUntilDeleted(func() error { | ||
return f.KubeClient.CoreV1().Secrets(certsSecret.Namespace).Delete(certsSecret.Name, &metav1.DeleteOptions{}) | ||
}, func() error { | ||
_, err := f.KubeClient.CoreV1().Secrets(certsSecret.Namespace).Get(certsSecret.Name, metav1.GetOptions{}) | ||
return err | ||
}) | ||
Expect(err).NotTo(HaveOccurred()) | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
package framework | ||
|
||
import ( | ||
"fmt" | ||
|
||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/util/wait" | ||
clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" | ||
|
||
. "github.com/onsi/ginkgo" | ||
. "github.com/onsi/gomega" | ||
"github.com/prometheus/common/log" | ||
) | ||
|
||
func (f *Framework) CreateClusterAndWait(cluster *clusterv1alpha1.Cluster) { | ||
By(fmt.Sprintf("Creating %q cluster", cluster.Name)) | ||
err := wait.Poll(PollInterval, PoolTimeout, func() (bool, error) { | ||
_, err := f.CAPIClient.ClusterV1alpha1().Clusters(cluster.Namespace).Create(cluster) | ||
if err != nil { | ||
log.Infof("error creating cluster: %v", err) | ||
return false, nil | ||
} | ||
return true, nil | ||
}) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
err = wait.Poll(PollInterval, PoolTimeout, func() (bool, error) { | ||
_, err := f.CAPIClient.ClusterV1alpha1().Clusters(cluster.Namespace).Get(cluster.Name, metav1.GetOptions{}) | ||
if err != nil { | ||
return false, nil | ||
} | ||
return true, nil | ||
}) | ||
Expect(err).NotTo(HaveOccurred()) | ||
} |
Oops, something went wrong.