Skip to content

Commit

Permalink
Merge pull request #4668 from fabriziopandini/remove-clusterctl-manag…
Browse files Browse the repository at this point in the history
…ement-groups

⚠️ Remove clusterctl management groups
  • Loading branch information
k8s-ci-robot authored May 27, 2021
2 parents 3659f22 + aba35d7 commit 24ea203
Show file tree
Hide file tree
Showing 21 changed files with 297 additions and 1,087 deletions.
5 changes: 0 additions & 5 deletions cmd/clusterctl/api/v1alpha3/provider_type.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,6 @@ func (p *Provider) InstanceName() string {
return types.NamespacedName{Namespace: p.Namespace, Name: p.ManifestLabel()}.String()
}

// HasWatchingOverlapWith returns true if the provider has an overlapping watching namespace with another provider.
func (p *Provider) HasWatchingOverlapWith(other Provider) bool {
return p.WatchedNamespace == "" || p.WatchedNamespace == other.WatchedNamespace || other.WatchedNamespace == ""
}

// SameAs returns true if two providers have the same ProviderName and Type.
// Please note that there could be many instances of the same provider.
func (p *Provider) SameAs(other Provider) bool {
Expand Down
2 changes: 1 addition & 1 deletion cmd/clusterctl/client/alias.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ type ComponentsOptions repository.ComponentsOptions
// Template wraps a YAML file that defines the cluster objects (Cluster, Machines etc.).
type Template repository.Template

// UpgradePlan defines a list of possible upgrade targets for a management group.
// UpgradePlan defines a list of possible upgrade targets for a management cluster.
type UpgradePlan cluster.UpgradePlan

// CertManagerUpgradePlan defines the upgrade plan if cert-manager needs to be
Expand Down
6 changes: 2 additions & 4 deletions cmd/clusterctl/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,8 @@ type Client interface {
Move(options MoveOptions) error

// PlanUpgrade returns a set of suggested Upgrade plans for the cluster, and more specifically:
// - Each management group gets separated upgrade plans.
// - For each management group, an upgrade plan is generated for each API Version of Cluster API (contract) available, e.g.
// - Upgrade to the latest version in the the v1alpha2 series: ....
// - Upgrade to the latest version in the the v1alpha3 series: ....
// - Upgrade to the latest version in the the v1alpha3 series: ....
// - Upgrade to the latest version in the the v1alpha4 series: ....
PlanUpgrade(options PlanUpgradeOptions) ([]UpgradePlan, error)

// PlanCertManagerUpgrade returns a CertManagerUpgradePlan.
Expand Down
62 changes: 20 additions & 42 deletions cmd/clusterctl/client/cluster/installer.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,8 @@ type ProviderInstaller interface {

// Validate performs steps to validate a management cluster by looking at the current state and the providers in the queue.
// The following checks are performed in order to ensure a fully operational cluster:
// - There must be only one instance of the same provider per namespace
// - Instances of the same provider must not be fighting for objects (no watching overlap)
// - Providers must combine in valid management groups
// - All the providers must belong to one/only one management groups
// - All the providers in a management group must support the same API Version of Cluster API (contract)
// - There must be only one instance of the same provider
// - All the providers in must support the same API Version of Cluster API (contract)
Validate() error

// Images returns the list of images required for installing the providers ready in the install queue.
Expand Down Expand Up @@ -167,43 +164,39 @@ func (i *providerInstaller) Validate() error {

// Starts simulating what will be the resulting management cluster by adding to the list the providers in the installQueue.
// During this operation following checks are performed:
// - There must be only one instance of the same provider per namespace
// - Instances of the same provider must not be fighting for objects (no watching overlap)
// - There must be only one instance of the same provider
for _, components := range i.installQueue {
if providerList, err = simulateInstall(providerList, components); err != nil {
return errors.Wrapf(err, "installing provider %q can lead to a non functioning management cluster", components.ManifestLabel())
}
}

// Now that the provider list contains all the providers that are scheduled for install, gets the resulting management groups.
// During this operation following check is performed:
// - Providers must combine in valid management groups
// - All the providers must belong to one/only one management group
managementGroups, err := deriveManagementGroups(providerList)
// Gets the API Version of Cluster API (contract) all the providers in the management cluster must support,
// which is the same of the core provider.
providerInstanceContracts := map[string]string{}

coreProviders := providerList.FilterCore()
if len(coreProviders) != 1 {
return errors.Errorf("invalid management cluster: there should a core provider, found %d", len(coreProviders))
}
coreProvider := coreProviders[0]

managementClusterContract, err := i.getProviderContract(providerInstanceContracts, coreProvider)
if err != nil {
return err
}

// Checks if all the providers supports the same API Version of Cluster API (contract) of the corresponding management group.
providerInstanceContracts := map[string]string{}
// Checks if all the providers supports the same API Version of Cluster API (contract).
for _, components := range i.installQueue {
provider := components.InventoryObject()

// Gets the management group the providers belongs to, and then retrieve the API Version of Cluster API (contract)
// all the providers in the management group must support.
managementGroup := managementGroups.FindManagementGroupByProviderInstanceName(provider.InstanceName())
managementGroupContract, err := i.getProviderContract(providerInstanceContracts, managementGroup.CoreProvider)
if err != nil {
return err
}

// Gets the API Version of Cluster API (contract) the provider support and compare it with the management group contract.
// Gets the API Version of Cluster API (contract) the provider support and compare it with the management cluster contract.
providerContract, err := i.getProviderContract(providerInstanceContracts, provider)
if err != nil {
return err
}
if providerContract != managementGroupContract {
return errors.Errorf("installing provider %q can lead to a non functioning management cluster: the target version for the provider supports the %s API Version of Cluster API (contract), while the management group is using %s", components.ManifestLabel(), providerContract, managementGroupContract)
if providerContract != managementClusterContract {
return errors.Errorf("installing provider %q can lead to a non functioning management cluster: the target version for the provider supports the %s API Version of Cluster API (contract), while the management cluster is using %s", components.ManifestLabel(), providerContract, managementClusterContract)
}
}
return nil
Expand Down Expand Up @@ -258,26 +251,11 @@ func simulateInstall(providerList *clusterctlv1.ProviderList, components reposit
provider := components.InventoryObject()

existingInstances := providerList.FilterByProviderNameAndType(provider.ProviderName, provider.GetProviderType())

// Target Namespace check
// Installing two instances of the same provider in the same namespace won't be supported
for _, i := range existingInstances {
if i.Namespace == provider.Namespace {
return providerList, errors.Errorf("there is already an instance of the %q provider installed in the %q namespace", provider.ManifestLabel(), provider.Namespace)
}
}

// Watching Namespace check:
// If we are going to install an instance of a provider watching objects in namespaces already controlled by other providers
// then there will be providers fighting for objects...
for _, i := range existingInstances {
if i.HasWatchingOverlapWith(provider) {
return providerList, errors.Errorf("the new instance of the %q provider is going to watch for objects in the namespace %q that is already controlled by other instances of the same provider", provider.ManifestLabel(), provider.WatchedNamespace)
}
if len(existingInstances) > 0 {
return providerList, errors.Errorf("there is already an instance of the %q provider installed in the %q namespace", provider.ManifestLabel(), provider.Namespace)
}

providerList.Items = append(providerList.Items, provider)

return providerList, nil
}

Expand Down
38 changes: 7 additions & 31 deletions cmd/clusterctl/client/cluster/installer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,16 +128,16 @@ func Test_providerInstaller_Validate(t *testing.T) {
wantErr: false,
},
{
name: "install another instance of infra1/current contract on a cluster already initialized with core/current contract + infra1/current contract, no overlaps",
name: "install another instance of infra1/current contract on a cluster already initialized with core/current contract + infra1/current contract",
fields: fields{
proxy: test.NewFakeProxy().
WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", "").
WithProviderInventory("infra1", clusterctlv1.InfrastructureProviderType, "v1.0.0", "ns1", "ns1"),
WithProviderInventory("infra1", clusterctlv1.InfrastructureProviderType, "v1.0.0", "ns1", ""),
installQueue: []repository.Components{
newFakeComponents("infra2", clusterctlv1.InfrastructureProviderType, "v1.0.0", "ns2", "ns2"),
newFakeComponents("infra1", clusterctlv1.InfrastructureProviderType, "v1.0.0", "ns2", ""),
},
},
wantErr: false,
wantErr: true,
},
{
name: "install another instance of infra1/current contract on a cluster already initialized with core/current contract + infra1/current contract, same namespace of the existing infra1",
Expand All @@ -152,37 +152,13 @@ func Test_providerInstaller_Validate(t *testing.T) {
wantErr: true,
},
{
name: "install another instance of infra1/current contract on a cluster already initialized with core/current contract + infra1/current contract, watching overlap with the existing infra1",
name: "install another instance of infra1/current contract on a cluster already initialized with core/current contract + infra1/current contract, different namespace of the existing infra1",
fields: fields{
proxy: test.NewFakeProxy().
WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", "").
WithProviderInventory("infra1", clusterctlv1.InfrastructureProviderType, "v1.0.0", "infra1-system", ""),
installQueue: []repository.Components{
newFakeComponents("infra1", clusterctlv1.InfrastructureProviderType, "v1.0.0", "infra2-system", ""),
},
},
wantErr: true,
},
{
name: "install another instance of infra1/current contract on a cluster already initialized with core/current contract + infra1/current contract, not part of the existing management group",
fields: fields{
proxy: test.NewFakeProxy().
WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "ns1", "ns1").
WithProviderInventory("infra1", clusterctlv1.InfrastructureProviderType, "v1.0.0", "ns1", "ns1"),
installQueue: []repository.Components{
newFakeComponents("infra1", clusterctlv1.InfrastructureProviderType, "v1.0.0", "ns2", "ns2"),
},
},
wantErr: true,
},
{
name: "install an instance of infra1/current contract on a cluster already initialized with two core/current contract, but it is part of two management group",
fields: fields{
proxy: test.NewFakeProxy(). // cluster with two core (two management groups)
WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "ns1", "ns1").
WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "ns2", "ns2"),
WithProviderInventory("infra1", clusterctlv1.InfrastructureProviderType, "v1.0.0", "n1", ""),
installQueue: []repository.Components{
newFakeComponents("infra1", clusterctlv1.InfrastructureProviderType, "v1.0.0", "infra1-system", ""),
newFakeComponents("infra1", clusterctlv1.InfrastructureProviderType, "v1.0.0", "n2", ""),
},
},
wantErr: true,
Expand Down
3 changes: 0 additions & 3 deletions cmd/clusterctl/client/cluster/inventory.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,6 @@ type InventoryClient interface {
// this as the default namespace; In case there are more instances for the same provider installed in different namespaces, there is no default provider namespace.
GetDefaultProviderNamespace(provider string, providerType clusterctlv1.ProviderType) (string, error)

// GetManagementGroups returns the list of management groups defined in the management cluster.
GetManagementGroups() (ManagementGroupList, error)

// CheckCAPIContract checks the Cluster API version installed in the management cluster, and fails if this version
// does not match the current one supported by clusterctl.
CheckCAPIContract(...CheckCAPIContractOption) error
Expand Down
154 changes: 0 additions & 154 deletions cmd/clusterctl/client/cluster/inventory_managementgroup.go

This file was deleted.

Loading

0 comments on commit 24ea203

Please sign in to comment.