Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🐛 Add kubernetes server validation on clusterctl #2842

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions cmd/clusterctl/client/cluster/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)

const (
minimumKubernetesVersion = "v1.16.0"
)

var (
ctx = context.TODO()
)
Expand Down Expand Up @@ -187,6 +191,9 @@ type Proxy interface {
// CurrentNamespace returns the namespace from the current context in the kubeconfig file
CurrentNamespace() (string, error)

// ValidateKubernetesVersion returns an error if management cluster version less than minimumKubernetesVersion
ValidateKubernetesVersion() error

// NewClient returns a new controller runtime Client object for working on the management cluster
NewClient() (client.Client, error)

Expand Down
4 changes: 4 additions & 0 deletions cmd/clusterctl/client/cluster/inventory.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,10 @@ func newInventoryClient(proxy Proxy, pollImmediateWaiter PollImmediateWaiter) *i
func (p *inventoryClient) EnsureCustomResourceDefinitions() error {
log := logf.Log

if err := p.proxy.ValidateKubernetesVersion(); err != nil {
return err
}

// Being this the first connection of many clusterctl operations, we want to fail fast if there is no
// connectivity to the cluster, so we try to get a client as a first thing.
// NB. NewClient has an internal retry loop that should mitigate temporary connection glitch; here we are
Expand Down
25 changes: 25 additions & 0 deletions cmd/clusterctl/client/cluster/proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
Expand Down Expand Up @@ -64,6 +65,30 @@ func (k *proxy) CurrentNamespace() (string, error) {
return "default", nil
}

func (k *proxy) ValidateKubernetesVersion() error {
config, err := k.getConfig()
if err != nil {
return err
}

client := discovery.NewDiscoveryClientForConfigOrDie(config)
serverVersion, err := client.ServerVersion()
if err != nil {
return errors.Wrap(err, "failed to retrieve server version")
}

compver, err := utilversion.MustParseGeneric(serverVersion.String()).Compare(minimumKubernetesVersion)
if err != nil {
return errors.Wrap(err, "failed to parse and compare server version")
}

if compver == -1 {
return errors.Errorf("unsupported management cluster server version: %s - minimum required version is %s", serverVersion.String(), minimumKubernetesVersion)
}

return nil
}

func (k *proxy) NewClient() (client.Client, error) {
config, err := k.getConfig()
if err != nil {
Expand Down
4 changes: 4 additions & 0 deletions cmd/clusterctl/internal/test/fake_proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,10 @@ func (f *FakeProxy) CurrentNamespace() (string, error) {
return "default", nil
}

func (f *FakeProxy) ValidateKubernetesVersion() error {
return nil
}

func (f *FakeProxy) NewClient() (client.Client, error) {
if f.cs != nil {
return f.cs, nil
Expand Down