From 5afd3ac0578694a694fa42bf9d1904023a012ea6 Mon Sep 17 00:00:00 2001 From: shysank Date: Wed, 24 Feb 2021 11:14:14 -0800 Subject: [PATCH] extra validations for v1alpha3 -> valpha4 upgrade --- cmd/clusterctl/client/cluster/components.go | 26 +++ .../client/cluster/components_test.go | 36 ++++ cmd/clusterctl/client/cluster/inventory.go | 36 ++++ .../client/cluster/inventory_test.go | 48 +++++ cmd/clusterctl/client/cluster/upgrader.go | 15 ++ .../client/cluster/upgrader_test.go | 171 ++++++++++++++++++ 6 files changed, 332 insertions(+) diff --git a/cmd/clusterctl/client/cluster/components.go b/cmd/clusterctl/client/cluster/components.go index 5e69d6100af7..baa5de10e2d5 100644 --- a/cmd/clusterctl/client/cluster/components.go +++ b/cmd/clusterctl/client/cluster/components.go @@ -21,7 +21,9 @@ import ( "strings" "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" @@ -49,6 +51,10 @@ type ComponentsClient interface { // it is required to explicitly opt-in for the deletion of the namespace where the provider components are hosted // and for the deletion of the provider's CRDs. Delete(options DeleteOptions) error + + // DeleteWebhookNamespace deletes the core provider webhook namespace (eg. capi-webhook-system). + // This is required when upgrading to v1alpha4 where webhooks are included in the controller itself. + DeleteWebhookNamespace() error } // providerComponents implements ComponentsClient. @@ -211,6 +217,26 @@ func (p *providerComponents) Delete(options DeleteOptions) error { return kerrors.NewAggregate(errList) } +func (p *providerComponents) DeleteWebhookNamespace() error { + log := logf.Log + log.V(5).Info("Deleting %s namespace", repository.WebhookNamespaceName) + + c, err := p.proxy.NewClient() + if err != nil { + return err + } + + coreProviderWebhookNs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: repository.WebhookNamespaceName}} + if err := c.Delete(ctx, coreProviderWebhookNs); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return errors.Wrapf(err, "failed to delete namespace %s", repository.WebhookNamespaceName) + } + + return nil +} + // newComponentsClient returns a providerComponents. func newComponentsClient(proxy Proxy) *providerComponents { return &providerComponents{ diff --git a/cmd/clusterctl/client/cluster/components_test.go b/cmd/clusterctl/client/cluster/components_test.go index 49acd89b6c15..bff66bdcb376 100644 --- a/cmd/clusterctl/client/cluster/components_test.go +++ b/cmd/clusterctl/client/cluster/components_test.go @@ -295,3 +295,39 @@ func Test_providerComponents_Delete(t *testing.T) { }) } } + +func Test_providerComponents_DeleteCoreProviderWebhookNamespace(t *testing.T) { + t.Run("deletes capi-webhook-system namespace", func(t *testing.T) { + g := NewWithT(t) + labels := map[string]string{ + "foo": "bar", + } + initObjs := []client.Object{ + &corev1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "capi-webhook-system", + Labels: labels, + }, + }, + } + + proxy := test.NewFakeProxy().WithObjs(initObjs...) + proxyClient, _ := proxy.NewClient() + var nsList corev1.NamespaceList + + // assert length before deleting + _ = proxyClient.List(ctx, &nsList) + g.Expect(len(nsList.Items)).Should(Equal(1)) + + c := newComponentsClient(proxy) + err := c.DeleteWebhookNamespace() + g.Expect(err).To(Not(HaveOccurred())) + + // assert length after deleting + _ = proxyClient.List(ctx, &nsList) + g.Expect(len(nsList.Items)).Should(Equal(0)) + }) +} diff --git a/cmd/clusterctl/client/cluster/inventory.go b/cmd/clusterctl/client/cluster/inventory.go index 23ced556988c..4fe26844dc8b 100644 --- a/cmd/clusterctl/client/cluster/inventory.go +++ b/cmd/clusterctl/client/cluster/inventory.go @@ -24,6 +24,8 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" @@ -109,6 +111,9 @@ type InventoryClient interface { // CheckCAPIContract checks the Cluster API version installed in the management cluster, and fails if this version // does not match the current one supported by clusterctl. CheckCAPIContract(...CheckCAPIContractOption) error + + // CheckSingleProviderInstance ensures that only one instance of a provider is running, returns error otherwise. + CheckSingleProviderInstance() error } // inventoryClient implements InventoryClient. @@ -415,3 +420,34 @@ func (p *inventoryClient) CheckCAPIContract(options ...CheckCAPIContractOption) } return errors.Errorf("failed to check Cluster API version") } + +func (p *inventoryClient) CheckSingleProviderInstance() error { + providers, err := p.List() + if err != nil { + return err + } + + providerGroups := make(map[string][]string) + for _, p := range providers.Items { + namespacedName := types.NamespacedName{Namespace: p.Namespace, Name: p.Name}.String() + if providers, ok := providerGroups[p.ManifestLabel()]; ok { + providerGroups[p.ManifestLabel()] = append(providers, namespacedName) + } else { + providerGroups[p.ManifestLabel()] = []string{namespacedName} + } + } + + var errs []error + for provider, providerInstances := range providerGroups { + if len(providerInstances) > 1 { + errs = append(errs, errors.Errorf("multiple instance of provider type %q found: %v", provider, providerInstances)) + } + } + + if len(errs) > 0 { + return errors.Wrap(kerrors.NewAggregate(errs), "detected multiple instances of the same provider, "+ + "but clusterctl v1alpha4 does not support this use case. See https://cluster-api.sigs.k8s.io/developer/architecture/controllers/support-multiple-instances.html for more details") + } + + return nil +} diff --git a/cmd/clusterctl/client/cluster/inventory_test.go b/cmd/clusterctl/client/cluster/inventory_test.go index 7f6416e51c96..44e5a492d835 100644 --- a/cmd/clusterctl/client/cluster/inventory_test.go +++ b/cmd/clusterctl/client/cluster/inventory_test.go @@ -337,3 +337,51 @@ func Test_CheckCAPIContract(t *testing.T) { }) } } + +func Test_inventoryClient_CheckSingleProviderInstance(t *testing.T) { + type fields struct { + initObjs []client.Object + } + tests := []struct { + name string + fields fields + wantErr bool + }{ + { + name: "Returns error when there are multiple instances of the same provider", + fields: fields{ + initObjs: []client.Object{ + &clusterctlv1.Provider{Type: string(clusterctlv1.CoreProviderType), ProviderName: "foo", ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns1"}}, + &clusterctlv1.Provider{Type: string(clusterctlv1.CoreProviderType), ProviderName: "foo", ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns2"}}, + &clusterctlv1.Provider{Type: string(clusterctlv1.InfrastructureProviderType), ProviderName: "bar", ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "ns2"}}, + }, + }, + wantErr: true, + }, + { + name: "Does not return error when there is only single instance of all providers", + fields: fields{ + initObjs: []client.Object{ + &clusterctlv1.Provider{Type: string(clusterctlv1.CoreProviderType), ProviderName: "foo", ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns1"}}, + &clusterctlv1.Provider{Type: string(clusterctlv1.CoreProviderType), ProviderName: "foo-1", ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns2"}}, + &clusterctlv1.Provider{Type: string(clusterctlv1.InfrastructureProviderType), ProviderName: "bar", ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "ns2"}}, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + p := newInventoryClient(test.NewFakeProxy().WithObjs(tt.fields.initObjs...), fakePollImmediateWaiter) + err := p.CheckSingleProviderInstance() + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + + g.Expect(err).NotTo(HaveOccurred()) + }) + } +} diff --git a/cmd/clusterctl/client/cluster/upgrader.go b/cmd/clusterctl/client/cluster/upgrader.go index 646f425df6c6..ea85f1478815 100644 --- a/cmd/clusterctl/client/cluster/upgrader.go +++ b/cmd/clusterctl/client/cluster/upgrader.go @@ -355,6 +355,13 @@ func (u *providerUpgrader) getUpgradeComponents(provider UpgradeItem) (repositor } func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan) error { + // Check for multiple instances of the same provider if current contract is v1alpha3. + if upgradePlan.Contract == clusterv1.GroupVersion.Version { + if err := u.providerInventory.CheckSingleProviderInstance(); err != nil { + return err + } + } + for _, upgradeItem := range upgradePlan.Providers { // If there is not a specified next version, skip it (we are already up-to-date). if upgradeItem.NextVersion == "" { @@ -381,6 +388,14 @@ func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan) error { return err } } + + // Delete webhook namespace since it's not needed from v1alpha4. + if upgradePlan.Contract == clusterv1.GroupVersion.Version { + if err := u.providerComponents.DeleteWebhookNamespace(); err != nil { + return err + } + } + return nil } diff --git a/cmd/clusterctl/client/cluster/upgrader_test.go b/cmd/clusterctl/client/cluster/upgrader_test.go index 89aad8fdebcb..cf3014744709 100644 --- a/cmd/clusterctl/client/cluster/upgrader_test.go +++ b/cmd/clusterctl/client/cluster/upgrader_test.go @@ -1246,3 +1246,174 @@ func Test_providerUpgrader_createCustomPlan(t *testing.T) { }) } } + +// TODO add tests for success scenarios. +func Test_providerUpgrader_ApplyPlan(t *testing.T) { + type fields struct { + reader config.Reader + repository map[string]repository.Repository + proxy Proxy + } + + tests := []struct { + name string + fields fields + coreProvider clusterctlv1.Provider + contract string + wantErr bool + errorMsg string + }{ + { + name: "fails to upgrade to v1alpha4 when there are multiple instances of the same provider", + fields: fields{ + // config for two providers + reader: test.NewFakeReader(). + WithProvider("cluster-api", clusterctlv1.CoreProviderType, "https://somewhere.com"). + WithProvider("infra", clusterctlv1.InfrastructureProviderType, "https://somewhere.com"), + // two provider repositories, with current v1alpha3 contract and new versions for v1alpha4 contract + repository: map[string]repository.Repository{ + "cluster-api": test.NewFakeRepository(). + WithVersions("v1.0.0", "v1.0.1", "v2.0.0"). + WithMetadata("v2.0.0", &clusterctlv1.Metadata{ + ReleaseSeries: []clusterctlv1.ReleaseSeries{ + {Major: 1, Minor: 0, Contract: "v1alpha3"}, + {Major: 2, Minor: 0, Contract: "v1alpha4"}, + }, + }), + "infrastructure-infra": test.NewFakeRepository(). + WithVersions("v2.0.0", "v2.0.1", "v3.0.0"). + WithMetadata("v3.0.0", &clusterctlv1.Metadata{ + ReleaseSeries: []clusterctlv1.ReleaseSeries{ + {Major: 2, Minor: 0, Contract: "v1alpha3"}, + {Major: 3, Minor: 0, Contract: "v1alpha4"}, + }, + }), + }, + // two providers with multiple instances existing in the cluster + proxy: test.NewFakeProxy(). + WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", "default"). + WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system-1", "default-1"). + WithProviderInventory("infra", clusterctlv1.InfrastructureProviderType, "v2.0.0", "infra-system", "default"). + WithProviderInventory("infra", clusterctlv1.InfrastructureProviderType, "v2.0.0", "infra-system-1", "default-1"), + }, + coreProvider: fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", ""), + contract: "v1alpha4", + wantErr: true, + errorMsg: "detected multiple instances of the same provider", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + configClient, _ := config.New("", config.InjectReader(tt.fields.reader)) + + u := &providerUpgrader{ + configClient: configClient, + repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) + }, + providerInventory: newInventoryClient(tt.fields.proxy, nil), + } + err := u.ApplyPlan(tt.coreProvider, tt.contract) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).Should(ContainSubstring(tt.errorMsg)) + return + } + + g.Expect(err).NotTo(HaveOccurred()) + }) + } +} + +// TODO add tests for success scenarios. +func Test_providerUpgrader_ApplyCustomPlan(t *testing.T) { + type fields struct { + reader config.Reader + repository map[string]repository.Repository + proxy Proxy + } + + tests := []struct { + name string + fields fields + coreProvider clusterctlv1.Provider + providersToUpgrade []UpgradeItem + wantErr bool + errorMsg string + }{ + { + name: "fails to upgrade to v1alpha4 when there are multiple instances of the same provider", + fields: fields{ + // config for two providers + reader: test.NewFakeReader(). + WithProvider("cluster-api", clusterctlv1.CoreProviderType, "https://somewhere.com"). + WithProvider("infra", clusterctlv1.InfrastructureProviderType, "https://somewhere.com"), + // two provider repositories, with current v1alpha3 contract and new versions for v1alpha4 contract + repository: map[string]repository.Repository{ + "cluster-api": test.NewFakeRepository(). + WithVersions("v1.0.0", "v1.0.1", "v2.0.0"). + WithMetadata("v2.0.0", &clusterctlv1.Metadata{ + ReleaseSeries: []clusterctlv1.ReleaseSeries{ + {Major: 1, Minor: 0, Contract: "v1alpha3"}, + {Major: 2, Minor: 0, Contract: "v1alpha4"}, + }, + }), + "infrastructure-infra": test.NewFakeRepository(). + WithVersions("v2.0.0", "v2.0.1", "v3.0.0"). + WithMetadata("v3.0.0", &clusterctlv1.Metadata{ + ReleaseSeries: []clusterctlv1.ReleaseSeries{ + {Major: 2, Minor: 0, Contract: "v1alpha3"}, + {Major: 3, Minor: 0, Contract: "v1alpha4"}, + }, + }), + }, + // two providers with multiple instances existing in the cluster + proxy: test.NewFakeProxy(). + WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", "default"). + WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system-1", "default-1"). + WithProviderInventory("infra", clusterctlv1.InfrastructureProviderType, "v2.0.0", "infra-system", "default"). + WithProviderInventory("infra", clusterctlv1.InfrastructureProviderType, "v2.0.0", "infra-system-1", "default-1"), + }, + coreProvider: fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", ""), + providersToUpgrade: []UpgradeItem{ + { + Provider: fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", ""), + NextVersion: "v2.0.0", + }, + { + Provider: fakeProvider("infra", clusterctlv1.InfrastructureProviderType, "v2.0.0", "infra-system", ""), + NextVersion: "v3.0.0", + }, + }, + wantErr: true, + errorMsg: "detected multiple instances of the same provider", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + configClient, _ := config.New("", config.InjectReader(tt.fields.reader)) + + u := &providerUpgrader{ + configClient: configClient, + repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) + }, + providerInventory: newInventoryClient(tt.fields.proxy, nil), + } + err := u.ApplyCustomPlan(tt.coreProvider, tt.providersToUpgrade...) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).Should(ContainSubstring(tt.errorMsg)) + return + } + + g.Expect(err).NotTo(HaveOccurred()) + }) + } +}