Skip to content

Commit

Permalink
Prepare main branch for v1.6 development
Browse files Browse the repository at this point in the history
Signed-off-by: Furkat Gofurov <[email protected]>
  • Loading branch information
furkatgofurov7 committed Aug 10, 2023
1 parent 98fa0ca commit 5c4ce8b
Show file tree
Hide file tree
Showing 17 changed files with 293 additions and 186 deletions.
12 changes: 6 additions & 6 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,7 @@ generate-doctoc:
TRACE=$(TRACE) ./hack/generate-doctoc.sh

.PHONY: generate-e2e-templates
generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v1.0 v1.3 v1.4 main) ## Generate cluster templates for all versions
generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v1.0 v1.4 v1.5 main) ## Generate cluster templates for all versions

DOCKER_TEMPLATES := test/e2e/data/infrastructure-docker
INMEMORY_TEMPLATES := test/e2e/data/infrastructure-inmemory
Expand All @@ -517,16 +517,16 @@ INMEMORY_TEMPLATES := test/e2e/data/infrastructure-inmemory
generate-e2e-templates-v1.0: $(KUSTOMIZE)
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.0/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.0/cluster-template.yaml

.PHONY: generate-e2e-templates-v1.3
generate-e2e-templates-v1.3: $(KUSTOMIZE)
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.3/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.3/cluster-template.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.3/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.3/cluster-template-topology.yaml

.PHONY: generate-e2e-templates-v1.4
generate-e2e-templates-v1.4: $(KUSTOMIZE)
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.4/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.4/cluster-template.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.4/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.4/cluster-template-topology.yaml

.PHONY: generate-e2e-templates-v1.5
generate-e2e-templates-v1.5: $(KUSTOMIZE)
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.5/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.5/cluster-template.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.5/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.5/cluster-template-topology.yaml

.PHONY: generate-e2e-templates-main
generate-e2e-templates-main: $(KUSTOMIZE)
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template.yaml
Expand Down
12 changes: 6 additions & 6 deletions cmd/clusterctl/hack/create-local-repository.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,36 +52,36 @@
providers = {
'cluster-api': {
'componentsFile': 'core-components.yaml',
'nextVersion': 'v1.5.99',
'nextVersion': 'v1.6.99',
'type': 'CoreProvider',
},
'bootstrap-kubeadm': {
'componentsFile': 'bootstrap-components.yaml',
'nextVersion': 'v1.5.99',
'nextVersion': 'v1.6.99',
'type': 'BootstrapProvider',
'configFolder': 'bootstrap/kubeadm/config/default',
},
'control-plane-kubeadm': {
'componentsFile': 'control-plane-components.yaml',
'nextVersion': 'v1.5.99',
'nextVersion': 'v1.6.99',
'type': 'ControlPlaneProvider',
'configFolder': 'controlplane/kubeadm/config/default',
},
'infrastructure-docker': {
'componentsFile': 'infrastructure-components.yaml',
'nextVersion': 'v1.5.99',
'nextVersion': 'v1.6.99',
'type': 'InfrastructureProvider',
'configFolder': 'test/infrastructure/docker/config/default',
},
'infrastructure-in-memory': {
'componentsFile': 'infrastructure-components.yaml',
'nextVersion': 'v1.5.99',
'nextVersion': 'v1.6.99',
'type': 'InfrastructureProvider',
'configFolder': 'test/infrastructure/inmemory/config/default',
},
'runtime-extension-test': {
'componentsFile': 'runtime-extension-components.yaml',
'nextVersion': 'v1.5.99',
'nextVersion': 'v1.6.99',
'type': 'RuntimeExtensionProvider',
'configFolder': 'test/extension/config/default',
},
Expand Down
5 changes: 3 additions & 2 deletions docs/release/release-tasks.md
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,8 @@ This comes down to changing occurrences of the old version to the new version, e
1. Create a new `v1.4` `metadata.yaml` (`test/e2e/data/shared/v1.4/metadata.yaml`) by copying
`test/e2e/data/shared/main/metadata.yaml`
2. Add the new release to the main `metadata.yaml` (`test/e2e/data/shared/main/metadata.yaml`).
3. Remove old `metadata.yaml`'s that are not used anymore in clusterctl upgrade tests.
3. Add the new release to the root level `metadata.yaml`
4. Remove old `metadata.yaml`'s that are not used anymore in clusterctl upgrade tests.
4. Adjust cluster templates in `test/e2e/data/infrastructure-docker`:
1. Create a new `v1.4` folder. It should be created based on the `main` folder and only contain the templates
we use in the clusterctl upgrade tests (as of today `cluster-template` and `cluster-template-topology`).
Expand All @@ -119,8 +120,8 @@ This comes down to changing occurrences of the old version to the new version, e
3. Make sure all tests are green (also run `pull-cluster-api-e2e-full-main` and `pull-cluster-api-e2e-workload-upgrade-1-23-latest-main`).

Prior art:
- 1.3 - https://github.com/kubernetes-sigs/cluster-api/pull/6834/files
- 1.4 - https://github.com/kubernetes-sigs/cluster-api/pull/7692/files
- 1.5 - https://github.com/kubernetes-sigs/cluster-api/pull/8430/files

#### Create a new GitHub milestone for the next release

Expand Down
2 changes: 1 addition & 1 deletion hack/tools/internal/tilt-prepare/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ const (

var (
// Defines the default version to be used for the provider CR if no version is specified in the tilt-provider.yaml|json file.
defaultProviderVersion = "v1.5.99"
defaultProviderVersion = "v1.6.99"

// This data struct mirrors a subset of info from the providers struct in the tilt file
// which is containing "hard-coded" tilt-provider.yaml files for the providers managed in the Cluster API repository.
Expand Down
3 changes: 3 additions & 0 deletions metadata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@
apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3
kind: Metadata
releaseSeries:
- major: 1
minor: 6
contract: v1beta1
- major: 1
minor: 5
contract: v1beta1
Expand Down
38 changes: 33 additions & 5 deletions test/e2e/clusterctl_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,11 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/discovery"
"k8s.io/klog/v2"
Expand Down Expand Up @@ -334,6 +336,11 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg

By("THE MANAGEMENT CLUSTER WITH THE OLDER VERSION OF PROVIDERS IS UP&RUNNING!")

machineCRD := &apiextensionsv1.CustomResourceDefinition{}
if err := managementClusterProxy.GetClient().Get(ctx, client.ObjectKey{Name: "machines.cluster.x-k8s.io"}, machineCRD); err != nil {
Expect(err).ToNot(HaveOccurred(), "failed to retrieve a machine CRD")
}

Byf("Creating a namespace for hosting the %s test workload cluster", specName)
testNamespace, testCancelWatches = framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{
Creator: managementClusterProxy.GetClient(),
Expand Down Expand Up @@ -387,13 +394,34 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
input.PreWaitForCluster(managementClusterProxy, testNamespace.Name, workLoadClusterName)
}

// Build GroupVersionKind for Machine resources
machineListGVK := schema.GroupVersionKind{
Group: machineCRD.Spec.Group,
Kind: machineCRD.Spec.Names.ListKind,
}

// Pick the storage version
for _, version := range machineCRD.Spec.Versions {
if version.Storage {
machineListGVK.Version = version.Name
break
}
}

By("Waiting for the machines to exist")
Eventually(func() (int64, error) {
var n int64
machineList := &clusterv1alpha3.MachineList{}
if err := managementClusterProxy.GetClient().List(ctx, machineList, client.InNamespace(testNamespace.Name), client.MatchingLabels{clusterv1.ClusterNameLabel: workLoadClusterName}); err == nil {
for _, machine := range machineList.Items {
if machine.Status.NodeRef != nil {
machineList := &unstructured.UnstructuredList{}
machineList.SetGroupVersionKind(machineListGVK)
if err := managementClusterProxy.GetClient().List(
ctx,
machineList,
client.InNamespace(testNamespace.Name),
client.MatchingLabels{clusterv1.ClusterNameLabel: workLoadClusterName},
); err == nil {
for _, m := range machineList.Items {
_, found, err := unstructured.NestedMap(m.Object, "status", "nodeRef")
if err == nil && found {
n++
}
}
Expand All @@ -411,7 +439,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
// Get the workloadCluster before the management cluster is upgraded to make sure that the upgrade did not trigger
// any unexpected rollouts.
preUpgradeMachineList := &unstructured.UnstructuredList{}
preUpgradeMachineList.SetGroupVersionKind(clusterv1alpha3.GroupVersion.WithKind("MachineList"))
preUpgradeMachineList.SetGroupVersionKind(machineListGVK)
err = managementClusterProxy.GetClient().List(
ctx,
preUpgradeMachineList,
Expand Down
64 changes: 28 additions & 36 deletions test/e2e/clusterctl_upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ var _ = Describe("When testing clusterctl upgrades (v1.0=>current)", func() {
})
})

var _ = Describe("When testing clusterctl upgrades (v1.3=>current)", func() {
var _ = Describe("When testing clusterctl upgrades (v1.4=>current)", func() {
ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput {
return ClusterctlUpgradeSpecInput{
E2EConfig: e2eConfig,
Expand All @@ -75,21 +75,17 @@ var _ = Describe("When testing clusterctl upgrades (v1.3=>current)", func() {
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
InfrastructureProvider: pointer.String("docker"),
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.6/clusterctl-{OS}-{ARCH}",
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.5/clusterctl-{OS}-{ARCH}",
// We have to pin the providers because with `InitWithProvidersContract` the test would
// use the latest version for the contract (which is v1.4.X for v1beta1).
InitWithCoreProvider: "cluster-api:v1.3.6",
InitWithBootstrapProviders: []string{"kubeadm:v1.3.6"},
InitWithControlPlaneProviders: []string{"kubeadm:v1.3.6"},
InitWithInfrastructureProviders: []string{"docker:v1.3.6"},
// We have to set this to an empty array as clusterctl v1.3 doesn't support
// runtime extension providers. If we don't do this the test will automatically
// try to deploy the latest version of our test-extension from docker.yaml.
InitWithRuntimeExtensionProviders: []string{},
InitWithProvidersContract: "v1beta1",
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/data/infrastructure-docker/v1.3/bases.
InitWithKubernetesVersion: "v1.26.4",
WorkloadKubernetesVersion: "v1.26.4",
// use the latest version for the contract (which is v1.5.X for v1beta1).
InitWithCoreProvider: "cluster-api:v1.4.5",
InitWithBootstrapProviders: []string{"kubeadm:v1.4.5"},
InitWithControlPlaneProviders: []string{"kubeadm:v1.4.5"},
InitWithInfrastructureProviders: []string{"docker:v1.4.5"},
InitWithProvidersContract: "v1beta1",
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.4/bases.
InitWithKubernetesVersion: "v1.27.3",
WorkloadKubernetesVersion: "v1.27.3",
MgmtFlavor: "topology",
WorkloadFlavor: "",
// This check ensures that ownerReference apiVersions are updated for all types after the upgrade.
Expand All @@ -107,7 +103,7 @@ var _ = Describe("When testing clusterctl upgrades (v1.3=>current)", func() {
})
})

var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.3=>current) [ClusterClass]", func() {
var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.4=>current) [ClusterClass]", func() {
ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput {
return ClusterctlUpgradeSpecInput{
E2EConfig: e2eConfig,
Expand All @@ -116,21 +112,17 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.3=>cur
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
InfrastructureProvider: pointer.String("docker"),
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.6/clusterctl-{OS}-{ARCH}",
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.5/clusterctl-{OS}-{ARCH}",
// We have to pin the providers because with `InitWithProvidersContract` the test would
// use the latest version for the contract (which is v1.4.X for v1beta1).
InitWithCoreProvider: "cluster-api:v1.3.6",
InitWithBootstrapProviders: []string{"kubeadm:v1.3.6"},
InitWithControlPlaneProviders: []string{"kubeadm:v1.3.6"},
InitWithInfrastructureProviders: []string{"docker:v1.3.6"},
// We have to set this to an empty array as clusterctl v1.3 doesn't support
// runtime extension providers. If we don't do this the test will automatically
// try to deploy the latest version of our test-extension from docker.yaml.
InitWithRuntimeExtensionProviders: []string{},
InitWithProvidersContract: "v1beta1",
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/data/infrastructure-docker/v1.3/bases.
InitWithKubernetesVersion: "v1.26.4",
WorkloadKubernetesVersion: "v1.26.4",
// use the latest version for the contract (which is v1.5.X for v1beta1).
InitWithCoreProvider: "cluster-api:v1.4.5",
InitWithBootstrapProviders: []string{"kubeadm:v1.4.5"},
InitWithControlPlaneProviders: []string{"kubeadm:v1.4.5"},
InitWithInfrastructureProviders: []string{"docker:v1.4.5"},
InitWithProvidersContract: "v1beta1",
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.4/bases.
InitWithKubernetesVersion: "v1.27.3",
WorkloadKubernetesVersion: "v1.27.3",
MgmtFlavor: "topology",
WorkloadFlavor: "topology",
// This check ensures that ownerReference apiVersions are updated for all types after the upgrade.
Expand All @@ -148,7 +140,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.3=>cur
})
})

var _ = Describe("When testing clusterctl upgrades (v1.4=>current)", func() {
var _ = Describe("When testing clusterctl upgrades (v1.5=>current)", func() {
ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput {
return ClusterctlUpgradeSpecInput{
E2EConfig: e2eConfig,
Expand All @@ -157,9 +149,9 @@ var _ = Describe("When testing clusterctl upgrades (v1.4=>current)", func() {
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
InfrastructureProvider: pointer.String("docker"),
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.0/clusterctl-{OS}-{ARCH}",
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/clusterctl-{OS}-{ARCH}",
InitWithProvidersContract: "v1beta1",
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/data/infrastructure-docker/v1.4/bases.
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.5/bases.
InitWithKubernetesVersion: "v1.27.3",
WorkloadKubernetesVersion: "v1.27.3",
MgmtFlavor: "topology",
Expand All @@ -179,7 +171,7 @@ var _ = Describe("When testing clusterctl upgrades (v1.4=>current)", func() {
})
})

var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.4=>current) [ClusterClass]", func() {
var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.5=>current) [ClusterClass]", func() {
ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput {
return ClusterctlUpgradeSpecInput{
E2EConfig: e2eConfig,
Expand All @@ -188,9 +180,9 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.4=>cur
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
InfrastructureProvider: pointer.String("docker"),
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.0/clusterctl-{OS}-{ARCH}",
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/clusterctl-{OS}-{ARCH}",
InitWithProvidersContract: "v1beta1",
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/data/infrastructure-docker/v1.4/bases.
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.5/bases.
InitWithKubernetesVersion: "v1.27.3",
WorkloadKubernetesVersion: "v1.27.3",
MgmtFlavor: "topology",
Expand Down
Loading

0 comments on commit 5c4ce8b

Please sign in to comment.