Skip to content

Commit

Permalink
Merge pull request #4230 from shysank/feature/4194
Browse files Browse the repository at this point in the history
✨ Extra validations for v1alpha3 -> v1alpha4 upgrade
  • Loading branch information
k8s-ci-robot authored Apr 6, 2021
2 parents c7b22a6 + 5afd3ac commit c5b434a
Show file tree
Hide file tree
Showing 6 changed files with 332 additions and 0 deletions.
26 changes: 26 additions & 0 deletions cmd/clusterctl/client/cluster/components.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,9 @@ import (
"strings"

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
Expand Down Expand Up @@ -49,6 +51,10 @@ type ComponentsClient interface {
// it is required to explicitly opt-in for the deletion of the namespace where the provider components are hosted
// and for the deletion of the provider's CRDs.
Delete(options DeleteOptions) error

// DeleteWebhookNamespace deletes the core provider webhook namespace (eg. capi-webhook-system).
// This is required when upgrading to v1alpha4 where webhooks are included in the controller itself.
DeleteWebhookNamespace() error
}

// providerComponents implements ComponentsClient.
Expand Down Expand Up @@ -211,6 +217,26 @@ func (p *providerComponents) Delete(options DeleteOptions) error {
return kerrors.NewAggregate(errList)
}

func (p *providerComponents) DeleteWebhookNamespace() error {
log := logf.Log
log.V(5).Info("Deleting %s namespace", repository.WebhookNamespaceName)

c, err := p.proxy.NewClient()
if err != nil {
return err
}

coreProviderWebhookNs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: repository.WebhookNamespaceName}}
if err := c.Delete(ctx, coreProviderWebhookNs); err != nil {
if apierrors.IsNotFound(err) {
return nil
}
return errors.Wrapf(err, "failed to delete namespace %s", repository.WebhookNamespaceName)
}

return nil
}

// newComponentsClient returns a providerComponents.
func newComponentsClient(proxy Proxy) *providerComponents {
return &providerComponents{
Expand Down
36 changes: 36 additions & 0 deletions cmd/clusterctl/client/cluster/components_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -295,3 +295,39 @@ func Test_providerComponents_Delete(t *testing.T) {
})
}
}

func Test_providerComponents_DeleteCoreProviderWebhookNamespace(t *testing.T) {
t.Run("deletes capi-webhook-system namespace", func(t *testing.T) {
g := NewWithT(t)
labels := map[string]string{
"foo": "bar",
}
initObjs := []client.Object{
&corev1.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: "capi-webhook-system",
Labels: labels,
},
},
}

proxy := test.NewFakeProxy().WithObjs(initObjs...)
proxyClient, _ := proxy.NewClient()
var nsList corev1.NamespaceList

// assert length before deleting
_ = proxyClient.List(ctx, &nsList)
g.Expect(len(nsList.Items)).Should(Equal(1))

c := newComponentsClient(proxy)
err := c.DeleteWebhookNamespace()
g.Expect(err).To(Not(HaveOccurred()))

// assert length after deleting
_ = proxyClient.List(ctx, &nsList)
g.Expect(len(nsList.Items)).Should(Equal(0))
})
}
36 changes: 36 additions & 0 deletions cmd/clusterctl/client/cluster/inventory.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
Expand Down Expand Up @@ -107,6 +109,9 @@ type InventoryClient interface {
// CheckCAPIContract checks the Cluster API version installed in the management cluster, and fails if this version
// does not match the current one supported by clusterctl.
CheckCAPIContract(...CheckCAPIContractOption) error

// CheckSingleProviderInstance ensures that only one instance of a provider is running, returns error otherwise.
CheckSingleProviderInstance() error
}

// inventoryClient implements InventoryClient.
Expand Down Expand Up @@ -407,3 +412,34 @@ func (p *inventoryClient) CheckCAPIContract(options ...CheckCAPIContractOption)
}
return errors.Errorf("failed to check Cluster API version")
}

func (p *inventoryClient) CheckSingleProviderInstance() error {
providers, err := p.List()
if err != nil {
return err
}

providerGroups := make(map[string][]string)
for _, p := range providers.Items {
namespacedName := types.NamespacedName{Namespace: p.Namespace, Name: p.Name}.String()
if providers, ok := providerGroups[p.ManifestLabel()]; ok {
providerGroups[p.ManifestLabel()] = append(providers, namespacedName)
} else {
providerGroups[p.ManifestLabel()] = []string{namespacedName}
}
}

var errs []error
for provider, providerInstances := range providerGroups {
if len(providerInstances) > 1 {
errs = append(errs, errors.Errorf("multiple instance of provider type %q found: %v", provider, providerInstances))
}
}

if len(errs) > 0 {
return errors.Wrap(kerrors.NewAggregate(errs), "detected multiple instances of the same provider, "+
"but clusterctl v1alpha4 does not support this use case. See https://cluster-api.sigs.k8s.io/developer/architecture/controllers/support-multiple-instances.html for more details")
}

return nil
}
48 changes: 48 additions & 0 deletions cmd/clusterctl/client/cluster/inventory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -337,3 +337,51 @@ func Test_CheckCAPIContract(t *testing.T) {
})
}
}

func Test_inventoryClient_CheckSingleProviderInstance(t *testing.T) {
type fields struct {
initObjs []client.Object
}
tests := []struct {
name string
fields fields
wantErr bool
}{
{
name: "Returns error when there are multiple instances of the same provider",
fields: fields{
initObjs: []client.Object{
&clusterctlv1.Provider{Type: string(clusterctlv1.CoreProviderType), ProviderName: "foo", ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns1"}},
&clusterctlv1.Provider{Type: string(clusterctlv1.CoreProviderType), ProviderName: "foo", ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns2"}},
&clusterctlv1.Provider{Type: string(clusterctlv1.InfrastructureProviderType), ProviderName: "bar", ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "ns2"}},
},
},
wantErr: true,
},
{
name: "Does not return error when there is only single instance of all providers",
fields: fields{
initObjs: []client.Object{
&clusterctlv1.Provider{Type: string(clusterctlv1.CoreProviderType), ProviderName: "foo", ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns1"}},
&clusterctlv1.Provider{Type: string(clusterctlv1.CoreProviderType), ProviderName: "foo-1", ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns2"}},
&clusterctlv1.Provider{Type: string(clusterctlv1.InfrastructureProviderType), ProviderName: "bar", ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "ns2"}},
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)

p := newInventoryClient(test.NewFakeProxy().WithObjs(tt.fields.initObjs...), fakePollImmediateWaiter)
err := p.CheckSingleProviderInstance()
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
return
}

g.Expect(err).NotTo(HaveOccurred())
})
}
}
15 changes: 15 additions & 0 deletions cmd/clusterctl/client/cluster/upgrader.go
Original file line number Diff line number Diff line change
Expand Up @@ -355,6 +355,13 @@ func (u *providerUpgrader) getUpgradeComponents(provider UpgradeItem) (repositor
}

func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan) error {
// Check for multiple instances of the same provider if current contract is v1alpha3.
if upgradePlan.Contract == clusterv1.GroupVersion.Version {
if err := u.providerInventory.CheckSingleProviderInstance(); err != nil {
return err
}
}

for _, upgradeItem := range upgradePlan.Providers {
// If there is not a specified next version, skip it (we are already up-to-date).
if upgradeItem.NextVersion == "" {
Expand All @@ -381,6 +388,14 @@ func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan) error {
return err
}
}

// Delete webhook namespace since it's not needed from v1alpha4.
if upgradePlan.Contract == clusterv1.GroupVersion.Version {
if err := u.providerComponents.DeleteWebhookNamespace(); err != nil {
return err
}
}

return nil
}

Expand Down
171 changes: 171 additions & 0 deletions cmd/clusterctl/client/cluster/upgrader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1246,3 +1246,174 @@ func Test_providerUpgrader_createCustomPlan(t *testing.T) {
})
}
}

// TODO add tests for success scenarios.
func Test_providerUpgrader_ApplyPlan(t *testing.T) {
type fields struct {
reader config.Reader
repository map[string]repository.Repository
proxy Proxy
}

tests := []struct {
name string
fields fields
coreProvider clusterctlv1.Provider
contract string
wantErr bool
errorMsg string
}{
{
name: "fails to upgrade to v1alpha4 when there are multiple instances of the same provider",
fields: fields{
// config for two providers
reader: test.NewFakeReader().
WithProvider("cluster-api", clusterctlv1.CoreProviderType, "https://somewhere.com").
WithProvider("infra", clusterctlv1.InfrastructureProviderType, "https://somewhere.com"),
// two provider repositories, with current v1alpha3 contract and new versions for v1alpha4 contract
repository: map[string]repository.Repository{
"cluster-api": test.NewFakeRepository().
WithVersions("v1.0.0", "v1.0.1", "v2.0.0").
WithMetadata("v2.0.0", &clusterctlv1.Metadata{
ReleaseSeries: []clusterctlv1.ReleaseSeries{
{Major: 1, Minor: 0, Contract: "v1alpha3"},
{Major: 2, Minor: 0, Contract: "v1alpha4"},
},
}),
"infrastructure-infra": test.NewFakeRepository().
WithVersions("v2.0.0", "v2.0.1", "v3.0.0").
WithMetadata("v3.0.0", &clusterctlv1.Metadata{
ReleaseSeries: []clusterctlv1.ReleaseSeries{
{Major: 2, Minor: 0, Contract: "v1alpha3"},
{Major: 3, Minor: 0, Contract: "v1alpha4"},
},
}),
},
// two providers with multiple instances existing in the cluster
proxy: test.NewFakeProxy().
WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", "default").
WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system-1", "default-1").
WithProviderInventory("infra", clusterctlv1.InfrastructureProviderType, "v2.0.0", "infra-system", "default").
WithProviderInventory("infra", clusterctlv1.InfrastructureProviderType, "v2.0.0", "infra-system-1", "default-1"),
},
coreProvider: fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", ""),
contract: "v1alpha4",
wantErr: true,
errorMsg: "detected multiple instances of the same provider",
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)

configClient, _ := config.New("", config.InjectReader(tt.fields.reader))

u := &providerUpgrader{
configClient: configClient,
repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) {
return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()]))
},
providerInventory: newInventoryClient(tt.fields.proxy, nil),
}
err := u.ApplyPlan(tt.coreProvider, tt.contract)
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
g.Expect(err.Error()).Should(ContainSubstring(tt.errorMsg))
return
}

g.Expect(err).NotTo(HaveOccurred())
})
}
}

// TODO add tests for success scenarios.
func Test_providerUpgrader_ApplyCustomPlan(t *testing.T) {
type fields struct {
reader config.Reader
repository map[string]repository.Repository
proxy Proxy
}

tests := []struct {
name string
fields fields
coreProvider clusterctlv1.Provider
providersToUpgrade []UpgradeItem
wantErr bool
errorMsg string
}{
{
name: "fails to upgrade to v1alpha4 when there are multiple instances of the same provider",
fields: fields{
// config for two providers
reader: test.NewFakeReader().
WithProvider("cluster-api", clusterctlv1.CoreProviderType, "https://somewhere.com").
WithProvider("infra", clusterctlv1.InfrastructureProviderType, "https://somewhere.com"),
// two provider repositories, with current v1alpha3 contract and new versions for v1alpha4 contract
repository: map[string]repository.Repository{
"cluster-api": test.NewFakeRepository().
WithVersions("v1.0.0", "v1.0.1", "v2.0.0").
WithMetadata("v2.0.0", &clusterctlv1.Metadata{
ReleaseSeries: []clusterctlv1.ReleaseSeries{
{Major: 1, Minor: 0, Contract: "v1alpha3"},
{Major: 2, Minor: 0, Contract: "v1alpha4"},
},
}),
"infrastructure-infra": test.NewFakeRepository().
WithVersions("v2.0.0", "v2.0.1", "v3.0.0").
WithMetadata("v3.0.0", &clusterctlv1.Metadata{
ReleaseSeries: []clusterctlv1.ReleaseSeries{
{Major: 2, Minor: 0, Contract: "v1alpha3"},
{Major: 3, Minor: 0, Contract: "v1alpha4"},
},
}),
},
// two providers with multiple instances existing in the cluster
proxy: test.NewFakeProxy().
WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", "default").
WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system-1", "default-1").
WithProviderInventory("infra", clusterctlv1.InfrastructureProviderType, "v2.0.0", "infra-system", "default").
WithProviderInventory("infra", clusterctlv1.InfrastructureProviderType, "v2.0.0", "infra-system-1", "default-1"),
},
coreProvider: fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", ""),
providersToUpgrade: []UpgradeItem{
{
Provider: fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", ""),
NextVersion: "v2.0.0",
},
{
Provider: fakeProvider("infra", clusterctlv1.InfrastructureProviderType, "v2.0.0", "infra-system", ""),
NextVersion: "v3.0.0",
},
},
wantErr: true,
errorMsg: "detected multiple instances of the same provider",
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)

configClient, _ := config.New("", config.InjectReader(tt.fields.reader))

u := &providerUpgrader{
configClient: configClient,
repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) {
return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()]))
},
providerInventory: newInventoryClient(tt.fields.proxy, nil),
}
err := u.ApplyCustomPlan(tt.coreProvider, tt.providersToUpgrade...)
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
g.Expect(err.Error()).Should(ContainSubstring(tt.errorMsg))
return
}

g.Expect(err).NotTo(HaveOccurred())
})
}
}

0 comments on commit c5b434a

Please sign in to comment.