Skip to content

Commit

Permalink
add support for IPAM and Runtime Extension provider in E2E tests
Browse files Browse the repository at this point in the history
  • Loading branch information
fabriziopandini committed Sep 30, 2022
1 parent c5f5115 commit 6881c75
Show file tree
Hide file tree
Showing 11 changed files with 285 additions and 200 deletions.
6 changes: 1 addition & 5 deletions test/e2e/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -105,10 +105,6 @@ cluster-templates-v1beta1: $(KUSTOMIZE) ## Generate cluster templates for v1beta
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/main/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/main/cluster-template-topology.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/main/cluster-template-ignition --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/main/cluster-template-ignition.yaml

test-extension-deployment: $(KUSTOMIZE) ## Generate deployment for test extension
mkdir -p $(REPO_ROOT)/test/e2e/data/test-extension
$(KUSTOMIZE) build $(REPO_ROOT)/test/extension/config/default > $(REPO_ROOT)/test/e2e/data/test-extension/deployment.yaml

## --------------------------------------
## Testing
## --------------------------------------
Expand All @@ -129,7 +125,7 @@ _SKIP_ARGS := $(foreach arg,$(strip $(GINKGO_SKIP)),-skip="$(arg)")
endif

.PHONY: run
run: $(GINKGO) cluster-templates test-extension-deployment ## Run the end-to-end tests
run: $(GINKGO) cluster-templates ## Run the end-to-end tests
$(GINKGO) -v --trace --tags=e2e --focus="$(GINKGO_FOCUS)" $(_SKIP_ARGS) --nodes=$(GINKGO_NODES) --timeout=$(GINKGO_TIMEOUT) --no-color=$(GINKGO_NOCOLOR) --output-dir="$(ARTIFACTS)" --junit-report="junit.e2e_suite.1.xml" $(GINKGO_ARGS) . -- \
-e2e.artifacts-folder="$(ARTIFACTS)" \
-e2e.config="$(E2E_CONF_FILE)" \
Expand Down
171 changes: 67 additions & 104 deletions test/e2e/cluster_upgrade_runtimesdk.go

Large diffs are not rendered by default.

23 changes: 14 additions & 9 deletions test/e2e/clusterctl_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,9 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
err := os.Chmod(clusterctlBinaryPath, 0744) //nolint:gosec
Expect(err).ToNot(HaveOccurred(), "failed to chmod temporary file")

// Adjusts the clusterctlConfigPath in case the clusterctl version <= v1.3 (thus using a config file with only the providers supported in those versions)
clusterctlConfigPath := clusterctl.AdjustConfigPathForBinary(clusterctlBinaryPath, input.ClusterctlConfigPath)

By("Initializing the workload cluster with older versions of providers")

if input.PreInit != nil {
Expand All @@ -243,14 +246,16 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
}

clusterctl.InitManagementClusterAndWatchControllerLogs(ctx, clusterctl.InitManagementClusterAndWatchControllerLogsInput{
ClusterctlBinaryPath: clusterctlBinaryPath, // use older version of clusterctl to init the management cluster
ClusterProxy: managementClusterProxy,
ClusterctlConfigPath: input.ClusterctlConfigPath,
CoreProvider: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, config.ClusterAPIProviderName)[0],
BootstrapProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, config.KubeadmBootstrapProviderName),
ControlPlaneProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, config.KubeadmControlPlaneProviderName),
InfrastructureProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, input.E2EConfig.InfrastructureProviders()...),
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
ClusterctlBinaryPath: clusterctlBinaryPath, // use older version of clusterctl to init the management cluster
ClusterProxy: managementClusterProxy,
ClusterctlConfigPath: clusterctlConfigPath,
CoreProvider: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, config.ClusterAPIProviderName)[0],
BootstrapProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, config.KubeadmBootstrapProviderName),
ControlPlaneProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, config.KubeadmControlPlaneProviderName),
InfrastructureProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, input.E2EConfig.InfrastructureProviders()...),
IPAMProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, input.E2EConfig.IPAMProviders()...),
RuntimeExtensionProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, input.E2EConfig.RuntimeExtensionProviders()...),
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
}, input.E2EConfig.GetIntervals(specName, "wait-controllers")...)

By("THE MANAGEMENT CLUSTER WITH THE OLDER VERSION OF PROVIDERS IS UP&RUNNING!")
Expand Down Expand Up @@ -282,7 +287,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
// pass reference to the management cluster hosting this test
KubeconfigPath: managementClusterProxy.GetKubeconfigPath(),
// pass the clusterctl config file that points to the local provider repository created for this test,
ClusterctlConfigPath: input.ClusterctlConfigPath,
ClusterctlConfigPath: clusterctlConfigPath,
// select template
Flavor: input.WorkloadFlavor,
// define template variables
Expand Down
9 changes: 8 additions & 1 deletion test/e2e/config/docker.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,14 @@ providers:
- sourcePath: "../data/infrastructure-docker/v1beta1/main/clusterclass-quick-start-runtimesdk.yaml"
- sourcePath: "../data/shared/v1beta1/main/metadata.yaml"

- name: test-extension
type: RuntimeExtensionProvider
versions:
- name: v1.3.99 # next; use manifest from source files
value: ../../../test/extension/config/default
files:
- sourcePath: "../data/shared/v1beta1/metadata.yaml"

variables:
# Default variables for the e2e test; those values could be overridden via env variables, thus
# allowing the same e2e config file to be re-used in different Prow jobs e.g. each one with a K8s version permutation.
Expand All @@ -220,7 +228,6 @@ variables:
DOCKER_POD_IPV6_CIDRS: "fd00:100:96::/48"
CNI: "./data/cni/kindnet/kindnet.yaml"
KUBETEST_CONFIGURATION: "./data/kubetest/conformance.yaml"
TEST_EXTENSION: "./data/test-extension/deployment.yaml"
NODE_DRAIN_TIMEOUT: "60s"
# Enabling the feature flags by setting the env variables.
EXP_CLUSTER_RESOURCE_SET: "true"
Expand Down
10 changes: 6 additions & 4 deletions test/e2e/e2e_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -240,10 +240,12 @@ func setupBootstrapCluster(config *clusterctl.E2EConfig, scheme *runtime.Scheme,

func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *clusterctl.E2EConfig, clusterctlConfig, artifactFolder string) {
clusterctl.InitManagementClusterAndWatchControllerLogs(ctx, clusterctl.InitManagementClusterAndWatchControllerLogsInput{
ClusterProxy: bootstrapClusterProxy,
ClusterctlConfigPath: clusterctlConfig,
InfrastructureProviders: config.InfrastructureProviders(),
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
ClusterProxy: bootstrapClusterProxy,
ClusterctlConfigPath: clusterctlConfig,
InfrastructureProviders: config.InfrastructureProviders(),
IPAMProviders: config.IPAMProviders(),
RuntimeExtensionProviders: config.RuntimeExtensionProviders(),
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
}, config.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...)
}

Expand Down
10 changes: 6 additions & 4 deletions test/e2e/self_hosted.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,10 +147,12 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput)

By("Initializing the workload cluster")
clusterctl.InitManagementClusterAndWatchControllerLogs(ctx, clusterctl.InitManagementClusterAndWatchControllerLogsInput{
ClusterProxy: selfHostedClusterProxy,
ClusterctlConfigPath: input.ClusterctlConfigPath,
InfrastructureProviders: input.E2EConfig.InfrastructureProviders(),
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
ClusterProxy: selfHostedClusterProxy,
ClusterctlConfigPath: input.ClusterctlConfigPath,
InfrastructureProviders: input.E2EConfig.InfrastructureProviders(),
IPAMProviders: input.E2EConfig.IPAMProviders(),
RuntimeExtensionProviders: input.E2EConfig.RuntimeExtensionProviders(),
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
}, input.E2EConfig.GetIntervals(specName, "wait-controllers")...)

By("Ensure API servers are stable before doing move")
Expand Down
82 changes: 55 additions & 27 deletions test/extension/handlers/lifecycle/handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,15 @@ import (

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
)
Expand All @@ -42,14 +45,13 @@ type Handler struct {
func (h *Handler) DoBeforeClusterCreate(ctx context.Context, request *runtimehooksv1.BeforeClusterCreateRequest, response *runtimehooksv1.BeforeClusterCreateResponse) {
log := ctrl.LoggerFrom(ctx)
log.Info("BeforeClusterCreate is called")
cluster := request.Cluster

if err := h.readResponseFromConfigMap(ctx, cluster.Namespace, runtimehooksv1.BeforeClusterCreate, response); err != nil {
if err := h.readResponseFromConfigMap(ctx, &request.Cluster, runtimehooksv1.BeforeClusterCreate, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
if err := h.recordCallInConfigMap(ctx, cluster.Namespace, runtimehooksv1.BeforeClusterCreate, response); err != nil {
if err := h.recordCallInConfigMap(ctx, &request.Cluster, runtimehooksv1.BeforeClusterCreate, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
}
Expand All @@ -59,15 +61,14 @@ func (h *Handler) DoBeforeClusterCreate(ctx context.Context, request *runtimehoo
func (h *Handler) DoBeforeClusterUpgrade(ctx context.Context, request *runtimehooksv1.BeforeClusterUpgradeRequest, response *runtimehooksv1.BeforeClusterUpgradeResponse) {
log := ctrl.LoggerFrom(ctx)
log.Info("BeforeClusterUpgrade is called")
cluster := request.Cluster

if err := h.readResponseFromConfigMap(ctx, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade, response); err != nil {
if err := h.readResponseFromConfigMap(ctx, &request.Cluster, runtimehooksv1.BeforeClusterUpgrade, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}

if err := h.recordCallInConfigMap(ctx, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade, response); err != nil {
if err := h.recordCallInConfigMap(ctx, &request.Cluster, runtimehooksv1.BeforeClusterUpgrade, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
}
Expand All @@ -77,15 +78,14 @@ func (h *Handler) DoBeforeClusterUpgrade(ctx context.Context, request *runtimeho
func (h *Handler) DoAfterControlPlaneInitialized(ctx context.Context, request *runtimehooksv1.AfterControlPlaneInitializedRequest, response *runtimehooksv1.AfterControlPlaneInitializedResponse) {
log := ctrl.LoggerFrom(ctx)
log.Info("AfterControlPlaneInitialized is called")
cluster := request.Cluster

if err := h.readResponseFromConfigMap(ctx, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized, response); err != nil {
if err := h.readResponseFromConfigMap(ctx, &request.Cluster, runtimehooksv1.AfterControlPlaneInitialized, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}

if err := h.recordCallInConfigMap(ctx, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized, response); err != nil {
if err := h.recordCallInConfigMap(ctx, &request.Cluster, runtimehooksv1.AfterControlPlaneInitialized, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
}
Expand All @@ -95,15 +95,14 @@ func (h *Handler) DoAfterControlPlaneInitialized(ctx context.Context, request *r
func (h *Handler) DoAfterControlPlaneUpgrade(ctx context.Context, request *runtimehooksv1.AfterControlPlaneUpgradeRequest, response *runtimehooksv1.AfterControlPlaneUpgradeResponse) {
log := ctrl.LoggerFrom(ctx)
log.Info("AfterControlPlaneUpgrade is called")
cluster := request.Cluster

if err := h.readResponseFromConfigMap(ctx, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade, response); err != nil {
if err := h.readResponseFromConfigMap(ctx, &request.Cluster, runtimehooksv1.AfterControlPlaneUpgrade, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}

if err := h.recordCallInConfigMap(ctx, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade, response); err != nil {
if err := h.recordCallInConfigMap(ctx, &request.Cluster, runtimehooksv1.AfterControlPlaneUpgrade, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
}
Expand All @@ -113,15 +112,14 @@ func (h *Handler) DoAfterControlPlaneUpgrade(ctx context.Context, request *runti
func (h *Handler) DoAfterClusterUpgrade(ctx context.Context, request *runtimehooksv1.AfterClusterUpgradeRequest, response *runtimehooksv1.AfterClusterUpgradeResponse) {
log := ctrl.LoggerFrom(ctx)
log.Info("AfterClusterUpgrade is called")
cluster := request.Cluster

if err := h.readResponseFromConfigMap(ctx, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade, response); err != nil {
if err := h.readResponseFromConfigMap(ctx, &request.Cluster, runtimehooksv1.AfterClusterUpgrade, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}

if err := h.recordCallInConfigMap(ctx, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade, response); err != nil {
if err := h.recordCallInConfigMap(ctx, &request.Cluster, runtimehooksv1.AfterClusterUpgrade, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
}
Expand All @@ -131,25 +129,33 @@ func (h *Handler) DoAfterClusterUpgrade(ctx context.Context, request *runtimehoo
func (h *Handler) DoBeforeClusterDelete(ctx context.Context, request *runtimehooksv1.BeforeClusterDeleteRequest, response *runtimehooksv1.BeforeClusterDeleteResponse) {
log := ctrl.LoggerFrom(ctx)
log.Info("BeforeClusterDelete is called")
cluster := request.Cluster

if err := h.readResponseFromConfigMap(ctx, cluster.Namespace, runtimehooksv1.BeforeClusterDelete, response); err != nil {
if err := h.readResponseFromConfigMap(ctx, &request.Cluster, runtimehooksv1.BeforeClusterDelete, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
if err := h.recordCallInConfigMap(ctx, cluster.Namespace, runtimehooksv1.BeforeClusterDelete, response); err != nil {
if err := h.recordCallInConfigMap(ctx, &request.Cluster, runtimehooksv1.BeforeClusterDelete, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
}

// TODO: consider if to cleanup the ConfigMap after gating Cluster deletion.
}

func (h *Handler) readResponseFromConfigMap(ctx context.Context, namespace string, hook runtimecatalog.Hook, response runtimehooksv1.ResponseObject) error {
func (h *Handler) readResponseFromConfigMap(ctx context.Context, cluster *clusterv1.Cluster, hook runtimecatalog.Hook, response runtimehooksv1.ResponseObject) error {
hookName := runtimecatalog.HookName(hook)
configMap := &corev1.ConfigMap{}
configMapName := "test-extension-hookresponses"
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap); err != nil {
return errors.Wrapf(err, "failed to read the ConfigMap %s", klog.KRef(namespace, configMapName))
configMapName := fmt.Sprintf("%s-test-extension-hookresponses", cluster.Name)
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: configMapName}, configMap); err != nil {
if apierrors.IsNotFound(err) {
configMap = responsesConfigMap(cluster)
if err := h.Client.Create(ctx, configMap); err != nil {
return errors.Wrapf(err, "failed to create the ConfigMap %s", klog.KRef(cluster.Namespace, configMapName))
}
} else {
return errors.Wrapf(err, "failed to read the ConfigMap %s", klog.KRef(cluster.Namespace, configMapName))
}
}
if err := yaml.Unmarshal([]byte(configMap.Data[hookName+"-preloadedResponse"]), response); err != nil {
return errors.Wrapf(err, "failed to read %q response information from ConfigMap", hook)
Expand All @@ -161,12 +167,34 @@ func (h *Handler) readResponseFromConfigMap(ctx context.Context, namespace strin
return nil
}

func (h *Handler) recordCallInConfigMap(ctx context.Context, namespace string, hook runtimecatalog.Hook, response runtimehooksv1.ResponseObject) error {
// responsesConfigMap generates a ConfigMap with preloaded responses for the test extension.
func responsesConfigMap(cluster *clusterv1.Cluster) *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-test-extension-hookresponses", cluster.Name),
Namespace: cluster.Namespace,
},
// Set the initial preloadedResponses for each of the tested hooks.
Data: map[string]string{
// Blocking hooks are set to return RetryAfterSeconds initially. These will be changed during the test.
"BeforeClusterCreate-preloadedResponse": `{"Status": "Success", "RetryAfterSeconds": 5}`,
"BeforeClusterUpgrade-preloadedResponse": `{"Status": "Success", "RetryAfterSeconds": 5}`,
"AfterControlPlaneUpgrade-preloadedResponse": `{"Status": "Success", "RetryAfterSeconds": 5}`,
"BeforeClusterDelete-preloadedResponse": `{"Status": "Success", "RetryAfterSeconds": 5}`,

// Non-blocking hooks are set to Status:Success.
"AfterControlPlaneInitialized-preloadedResponse": `{"Status": "Success"}`,
"AfterClusterUpgrade-preloadedResponse": `{"Status": "Success"}`,
},
}
}

func (h *Handler) recordCallInConfigMap(ctx context.Context, cluster *clusterv1.Cluster, hook runtimecatalog.Hook, response runtimehooksv1.ResponseObject) error {
hookName := runtimecatalog.HookName(hook)
configMap := &corev1.ConfigMap{}
configMapName := "test-extension-hookresponses"
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap); err != nil {
return errors.Wrapf(err, "failed to read the ConfigMap %s", klog.KRef(namespace, configMapName))
configMapName := fmt.Sprintf("%s-test-extension-hookresponses", cluster.Name)
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: configMapName}, configMap); err != nil {
return errors.Wrapf(err, "failed to read the ConfigMap %s", klog.KRef(cluster.Namespace, configMapName))
}
var patch client.Patch
if r, ok := response.(runtimehooksv1.RetryResponseObject); ok {
Expand All @@ -178,7 +206,7 @@ func (h *Handler) recordCallInConfigMap(ctx context.Context, namespace string, h
[]byte(fmt.Sprintf(`{"data":{"%s-actualResponseStatus":"%s"}}`, hookName, response.GetStatus()))) //nolint:gocritic
}
if err := h.Client.Patch(ctx, configMap, patch); err != nil {
return errors.Wrapf(err, "failed to update the ConfigMap %s", klog.KRef(namespace, configMapName))
return errors.Wrapf(err, "failed to update the ConfigMap %s", klog.KRef(cluster.Namespace, configMapName))
}
return nil
}
Loading

0 comments on commit 6881c75

Please sign in to comment.