From bebc07719909a5a1916ea4124d5d2c09eeb96f08 Mon Sep 17 00:00:00 2001 From: rambohe Date: Fri, 31 Dec 2021 10:57:29 +0800 Subject: [PATCH] remove k8s.io/kubernetes dependency from yurtctl join/reset (#697) --- Makefile | 2 +- go.mod | 3 +- go.sum | 3 - pkg/node-servant/components/yurthub.go | 20 +- pkg/util/kubeadmapi/kubeadmapi.go | 423 ---------- pkg/yurtctl/cmd/convert/convert.go | 2 +- pkg/yurtctl/cmd/join/join.go | 399 ++++------ pkg/yurtctl/cmd/join/joindata/data.go | 44 ++ .../cmd/join/phases/join-cloud-node.go | 225 ------ pkg/yurtctl/cmd/join/phases/join-edge-node.go | 238 ------ pkg/yurtctl/cmd/join/phases/joinnode.go | 190 +++++ pkg/yurtctl/cmd/join/phases/postcheck.go | 83 +- pkg/yurtctl/cmd/join/phases/preflight.go | 60 ++ pkg/yurtctl/cmd/join/phases/prepare.go | 32 +- pkg/yurtctl/cmd/reset/phases/cleanupnode.go | 154 ++++ .../phases/{cleanfile.go => cleanyurtfile.go} | 8 +- .../constants.go => reset/phases/data.go} | 35 +- pkg/yurtctl/cmd/reset/phases/preflight.go | 68 ++ pkg/yurtctl/cmd/reset/phases/unmount.go | 29 + pkg/yurtctl/cmd/reset/phases/unmount_linux.go | 46 ++ pkg/yurtctl/cmd/reset/reset.go | 142 +--- pkg/yurtctl/cmd/yurtinit/phases/prepare.go | 3 +- pkg/yurtctl/constants/constants.go | 57 +- .../app/apis/kubeadm/bootstraptokenhelpers.go | 158 ++++ .../kubeadm/bootstraptokenhelpers_test.go | 456 +++++++++++ .../app/apis/kubeadm/bootstraptokenstring.go | 91 +++ .../kubeadm/app/apis/kubeadm/types.go | 48 ++ .../kubeadm/app/cmd/options/constant.go | 77 ++ .../kubeadm/app/cmd/phases/workflow/phase.go | 86 ++ .../kubeadm/app/cmd/phases/workflow/runner.go | 485 ++++++++++++ .../app/cmd/phases/workflow/runner_test.go | 625 +++++++++++++++ .../kubeadm/app/constants/constants.go | 151 ++++ .../kubeadm/app/constants/constants_unix.go} | 17 +- .../app/constants/constants_windows.go | 24 + .../kubeadm/app/discovery/token/token.go | 242 ++++++ .../bootstraptoken/clusterinfo/clusterinfo.go | 117 +++ .../app/phases/bootstraptoken/node/token.go | 62 ++ .../kubeadm/app/phases/kubelet/flags.go | 116 +++ .../kubeadm/app/phases/kubelet/flags_test.go | 92 +++ .../kubeadm/app/phases/kubelet/flags_unix.go | 59 ++ .../app/phases/kubelet/flags_windows.go | 25 + .../kubeadm/app/phases/kubelet/kubelet.go | 81 ++ .../kubeadm/app/preflight/checks.go | 736 ++++++++++++++++++ .../kubeadm/app/preflight/checks_unix.go | 34 + .../kubeadm/app/preflight/checks_windows.go | 50 ++ .../kubernetes/kubeadm/app/preflight/utils.go | 44 ++ .../kubeadm/app/preflight/utils_test.go | 64 ++ .../kubeadm/app/util/apiclient/idempotency.go | 356 +++++++++ .../app/util/apiclient/idempotency_test.go | 182 +++++ .../app/util/apiclient/tryidempotency.go | 232 ++++++ .../kubeadm/app/util/apiclient/wait.go | 271 +++++++ .../kubernetes/kubeadm/app/util/arguments.go | 113 +++ .../kubeadm/app/util/cgroupdriver.go | 53 ++ .../kubeadm/app/util/initsystem/initsystem.go | 41 + .../app/util/initsystem/initsystem_unix.go | 164 ++++ .../app/util/initsystem/initsystem_windows.go | 245 ++++++ .../kubeadm/app/util/kubeconfig/kubeconfig.go | 204 +++++ .../app/util/kubeconfig/kubeconfig_test.go | 330 ++++++++ .../kubeadm/app/util/pubkeypin/pubkeypin.go | 115 +++ .../app/util/pubkeypin/pubkeypin_test.go | 157 ++++ .../kubeadm/app/util/runtime/runtime.go | 235 ++++++ .../kubeadm/app/util/runtime/runtime_test.go | 463 +++++++++++ .../kubeadm/app/util/runtime/runtime_unix.go | 38 + .../app/util/runtime/runtime_windows.go | 38 + .../kubelet/apis/config/register.go | 44 ++ .../kubelet/apis/config/scheme/scheme.go | 43 + .../kubernetes/kubelet/apis/config/types.go | 420 ++++++++++ .../kubelet/apis/config/v1beta1/defaults.go | 232 ++++++ .../apis/config/v1beta1/defaults_linux.go | 27 + .../apis/config/v1beta1/defaults_others.go | 26 + .../kubelet/apis/config/v1beta1/register.go | 43 + .../config/v1beta1/zz_generated.conversion.go | 564 ++++++++++++++ .../config/v1beta1/zz_generated.deepcopy.go | 21 + .../config/v1beta1/zz_generated.defaults.go | 38 + .../apis/config/zz_generated.deepcopy.go | 284 +++++++ .../kubelet/kubeletconfig/util/codec/codec.go | 106 +++ pkg/yurtctl/util/edgenode/common.go | 11 +- pkg/yurtctl/util/kubernetes/util.go | 123 ++- 78 files changed, 10039 insertions(+), 1386 deletions(-) delete mode 100644 pkg/util/kubeadmapi/kubeadmapi.go create mode 100644 pkg/yurtctl/cmd/join/joindata/data.go delete mode 100644 pkg/yurtctl/cmd/join/phases/join-cloud-node.go delete mode 100644 pkg/yurtctl/cmd/join/phases/join-edge-node.go create mode 100644 pkg/yurtctl/cmd/join/phases/joinnode.go create mode 100644 pkg/yurtctl/cmd/join/phases/preflight.go create mode 100644 pkg/yurtctl/cmd/reset/phases/cleanupnode.go rename pkg/yurtctl/cmd/reset/phases/{cleanfile.go => cleanyurtfile.go} (85%) rename pkg/yurtctl/cmd/{join/phases/constants.go => reset/phases/data.go} (57%) create mode 100644 pkg/yurtctl/cmd/reset/phases/preflight.go create mode 100644 pkg/yurtctl/cmd/reset/phases/unmount.go create mode 100644 pkg/yurtctl/cmd/reset/phases/unmount_linux.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/bootstraptokenhelpers.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/bootstraptokenhelpers_test.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/bootstraptokenstring.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/types.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/cmd/options/constant.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow/phase.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow/runner.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow/runner_test.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/constants/constants.go rename pkg/yurtctl/{cmd/join/phases/type.go => kubernetes/kubeadm/app/constants/constants_unix.go} (70%) create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_windows.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/discovery/token/token.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/phases/bootstraptoken/clusterinfo/clusterinfo.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/phases/bootstraptoken/node/token.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_test.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_unix.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_windows.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/kubelet.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_unix.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_windows.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/preflight/utils.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/preflight/utils_test.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/idempotency.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/idempotency_test.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/tryidempotency.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/wait.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/arguments.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/cgroupdriver.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_unix.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_windows.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig/kubeconfig.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig/kubeconfig_test.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/pubkeypin/pubkeypin.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/pubkeypin/pubkeypin_test.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_test.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_unix.go create mode 100644 pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_windows.go create mode 100644 pkg/yurtctl/kubernetes/kubelet/apis/config/register.go create mode 100644 pkg/yurtctl/kubernetes/kubelet/apis/config/scheme/scheme.go create mode 100644 pkg/yurtctl/kubernetes/kubelet/apis/config/types.go create mode 100644 pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults.go create mode 100644 pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_linux.go create mode 100644 pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_others.go create mode 100644 pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/register.go create mode 100644 pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.conversion.go create mode 100644 pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.deepcopy.go create mode 100644 pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.defaults.go create mode 100644 pkg/yurtctl/kubernetes/kubelet/apis/config/zz_generated.deepcopy.go create mode 100644 pkg/yurtctl/kubernetes/kubelet/kubeletconfig/util/codec/codec.go diff --git a/Makefile b/Makefile index 6a87614ac4f..bf0edbb52ee 100644 --- a/Makefile +++ b/Makefile @@ -90,4 +90,4 @@ GOLINT_BIN=$(shell which golangci-lint) endif lint: install-golint ## Run go lint against code. - $(GOLINT_BIN) run -v + $(GOLINT_BIN) run -v \ No newline at end of file diff --git a/go.mod b/go.mod index 6491764e5e7..4a2efd7a346 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/openyurtio/openyurt go 1.13 require ( + github.com/Microsoft/go-winio v0.4.14 github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/aliyun/alibaba-cloud-sdk-go v1.61.355 github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd @@ -15,7 +16,6 @@ require ( github.com/lithammer/dedent v1.1.0 github.com/onsi/ginkgo v1.13.0 github.com/onsi/gomega v1.10.1 - github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52 github.com/openyurtio/yurt-app-manager-api v0.18.8 github.com/pkg/errors v0.8.1 @@ -42,6 +42,7 @@ require ( k8s.io/kubectl v0.0.0 k8s.io/kubelet v0.0.0 k8s.io/kubernetes v1.18.8 + k8s.io/system-validators v1.0.4 k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a sigs.k8s.io/apiserver-network-proxy v0.0.15 ) diff --git a/go.sum b/go.sum index 0c379a2495e..f493c7e3f91 100644 --- a/go.sum +++ b/go.sum @@ -114,7 +114,6 @@ github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1 github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v17.12.0-ce-rc1.0.20200531234253-77e06fda0c94+incompatible h1:PmGHHCZ43l6h8aZIi+Xa+z1SWe4dFImd5EK3TNp1jlo= @@ -474,8 +473,6 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= diff --git a/pkg/node-servant/components/yurthub.go b/pkg/node-servant/components/yurthub.go index f56f86d8598..c8ce50972f6 100644 --- a/pkg/node-servant/components/yurthub.go +++ b/pkg/node-servant/components/yurthub.go @@ -30,6 +30,7 @@ import ( "k8s.io/klog/v2" enutil "github.com/openyurtio/openyurt/pkg/yurtctl/util/edgenode" + "github.com/openyurtio/openyurt/pkg/yurtctl/util/templates" "github.com/openyurtio/openyurt/pkg/yurthub/certificate/hubself" "github.com/openyurtio/openyurt/pkg/yurthub/storage/disk" "github.com/openyurtio/openyurt/pkg/yurthub/util" @@ -68,21 +69,22 @@ func (op *yurtHubOperator) Install() error { // 1-1. replace variables in yaml file klog.Infof("setting up yurthub apiServer addr") - yurthubTemplate := enutil.ReplaceRegularExpression(enutil.YurthubTemplate, - map[string]string{ - "__kubernetes_service_addr__": op.apiServerAddr, - "__yurthub_image__": op.yurthubImage, - "__join_token__": op.joinToken, - "__working_mode__": string(op.workingMode), - }) + yurthubTemplate, err := templates.SubsituteTemplate(enutil.YurthubTemplate, map[string]string{ + "kubernetesServerAddr": op.apiServerAddr, + "image": op.yurthubImage, + "joinToken": op.joinToken, + "workingMode": string(op.workingMode), + }) + if err != nil { + return err + } // 1-2. create yurthub.yaml podManifestPath := enutil.GetPodManifestPath() if err := enutil.EnsureDir(podManifestPath); err != nil { return err } - err := ioutil.WriteFile(getYurthubYaml(podManifestPath), []byte(yurthubTemplate), fileMode) - if err != nil { + if err := ioutil.WriteFile(getYurthubYaml(podManifestPath), []byte(yurthubTemplate), fileMode); err != nil { return err } klog.Infof("create the %s/yurt-hub.yaml", podManifestPath) diff --git a/pkg/util/kubeadmapi/kubeadmapi.go b/pkg/util/kubeadmapi/kubeadmapi.go deleted file mode 100644 index 23fffe7d954..00000000000 --- a/pkg/util/kubeadmapi/kubeadmapi.go +++ /dev/null @@ -1,423 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeadmapi - -import ( - "context" - "sort" - "strings" - "time" - - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/authentication/user" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - bootstrapapi "k8s.io/cluster-bootstrap/token/api" - bootstraputil "k8s.io/cluster-bootstrap/token/util" - bootstrapsecretutil "k8s.io/cluster-bootstrap/util/secrets" - "k8s.io/klog/v2" -) - -const ( - BootstrapSignerClusterRoleName = "kubeadm:bootstrap-signer-clusterinfo" - // NodeBootstrapTokenAuthGroup specifies which group a Node Bootstrap Token should be authenticated in - NodeBootstrapTokenAuthGroup = "system:bootstrappers:kubeadm:default-node-token" -) - -var ( - // DefaultTokenUsages specifies the default functions a token will get - DefaultTokenUsages = bootstrapapi.KnownTokenUsages - // DefaultTokenGroups specifies the default groups that this token will authenticate as when used for authentication - DefaultTokenGroups = []string{NodeBootstrapTokenAuthGroup} -) - -// BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used -// for both validation of the practically of the API server from a joining node's point -// of view and as an authentication method for the node in the bootstrap phase of -// "kubeadm join". This token is and should be short-lived -type BootstrapTokenString struct { - ID string - Secret string -} - -// BootstrapToken describes one bootstrap token, stored as a Secret in the cluster -// TODO: The BootstrapToken object should move out to either k8s.io/client-go or k8s.io/api in the future -// (probably as part of Bootstrap Tokens going GA). It should not be staged under the kubeadm API as it is now. -type BootstrapToken struct { - // Token is used for establishing bidirectional trust between nodes and control-planes. - // Used for joining nodes in the cluster. - Token *BootstrapTokenString - // Description sets a human-friendly message why this token exists and what it's used - // for, so other administrators can know its purpose. - Description string - // TTL defines the time to live for this token. Defaults to 24h. - // Expires and TTL are mutually exclusive. - TTL *metav1.Duration - // Expires specifies the timestamp when this token expires. Defaults to being set - // dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. - Expires *metav1.Time - // Usages describes the ways in which this token can be used. Can by default be used - // for establishing bidirectional trust, but that can be changed here. - Usages []string - // Groups specifies the extra groups that this token will authenticate as when/if - // used for authentication - Groups []string -} - -// String returns the string representation of the BootstrapTokenString -func (bts BootstrapTokenString) String() string { - if len(bts.ID) > 0 && len(bts.Secret) > 0 { - return bootstraputil.TokenFromIDAndSecret(bts.ID, bts.Secret) - } - return "" -} - -// ToSecret converts the given BootstrapToken object to its Secret representation that -// may be submitted to the API Server in order to be stored. -func (bt *BootstrapToken) ToSecret() *v1.Secret { - return &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: bootstraputil.BootstrapTokenSecretName(bt.Token.ID), - Namespace: metav1.NamespaceSystem, - }, - Type: v1.SecretType(bootstrapapi.SecretTypeBootstrapToken), - Data: encodeTokenSecretData(bt, time.Now()), - } -} - -// encodeTokenSecretData takes the token discovery object and an optional duration and returns the .Data for the Secret -// now is passed in order to be able to used in unit testing -func encodeTokenSecretData(token *BootstrapToken, now time.Time) map[string][]byte { - data := map[string][]byte{ - bootstrapapi.BootstrapTokenIDKey: []byte(token.Token.ID), - bootstrapapi.BootstrapTokenSecretKey: []byte(token.Token.Secret), - } - - if len(token.Description) > 0 { - data[bootstrapapi.BootstrapTokenDescriptionKey] = []byte(token.Description) - } - - // If for some strange reason both token.TTL and token.Expires would be set - // (they are mutually exclusive in validation so this shouldn't be the case), - // token.Expires has higher priority, as can be seen in the logic here. - if token.Expires != nil { - // Format the expiration date accordingly - // TODO: This maybe should be a helper function in bootstraputil? - expirationString := token.Expires.Time.Format(time.RFC3339) - data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(expirationString) - - } else if token.TTL != nil && token.TTL.Duration > 0 { - // Only if .Expires is unset, TTL might have an effect - // Get the current time, add the specified duration, and format it accordingly - expirationString := now.Add(token.TTL.Duration).Format(time.RFC3339) - data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(expirationString) - } - - for _, usage := range token.Usages { - data[bootstrapapi.BootstrapTokenUsagePrefix+usage] = []byte("true") - } - - if len(token.Groups) > 0 { - data[bootstrapapi.BootstrapTokenExtraGroupsKey] = []byte(strings.Join(token.Groups, ",")) - } - return data -} - -// BootstrapTokenFromSecret returns a BootstrapToken object from the given Secret -func BootstrapTokenFromSecret(secret *v1.Secret) (*BootstrapToken, error) { - // Get the Token ID field from the Secret data - tokenID := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenIDKey) - if len(tokenID) == 0 { - return nil, errors.Errorf("bootstrap Token Secret has no token-id data: %s", secret.Name) - } - - // Enforce the right naming convention - if secret.Name != bootstraputil.BootstrapTokenSecretName(tokenID) { - return nil, errors.Errorf("bootstrap token name is not of the form '%s(token-id)'. Actual: %q. Expected: %q", - bootstrapapi.BootstrapTokenSecretPrefix, secret.Name, bootstraputil.BootstrapTokenSecretName(tokenID)) - } - - tokenSecret := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenSecretKey) - if len(tokenSecret) == 0 { - return nil, errors.Errorf("bootstrap Token Secret has no token-secret data: %s", secret.Name) - } - - // Create the BootstrapTokenString object based on the ID and Secret - bts, err := NewBootstrapTokenStringFromIDAndSecret(tokenID, tokenSecret) - if err != nil { - return nil, errors.Wrap(err, "bootstrap Token Secret is invalid and couldn't be parsed") - } - - // Get the description (if any) from the Secret - description := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenDescriptionKey) - - // Expiration time is optional, if not specified this implies the token - // never expires. - secretExpiration := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenExpirationKey) - var expires *metav1.Time - if len(secretExpiration) > 0 { - expTime, err := time.Parse(time.RFC3339, secretExpiration) - if err != nil { - return nil, errors.Wrapf(err, "can't parse expiration time of bootstrap token %q", secret.Name) - } - expires = &metav1.Time{Time: expTime} - } - - // Build an usages string slice from the Secret data - var usages []string - for k, v := range secret.Data { - // Skip all fields that don't include this prefix - if !strings.HasPrefix(k, bootstrapapi.BootstrapTokenUsagePrefix) { - continue - } - // Skip those that don't have this usage set to true - if string(v) != "true" { - continue - } - usages = append(usages, strings.TrimPrefix(k, bootstrapapi.BootstrapTokenUsagePrefix)) - } - // Only sort the slice if defined - if usages != nil { - sort.Strings(usages) - } - - // Get the extra groups information from the Secret - // It's done this way to make .Groups be nil in case there is no items, rather than an - // empty slice or an empty slice with a "" string only - var groups []string - groupsString := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenExtraGroupsKey) - g := strings.Split(groupsString, ",") - if len(g) > 0 && len(g[0]) > 0 { - groups = g - } - - return &BootstrapToken{ - Token: bts, - Description: description, - Expires: expires, - Usages: usages, - Groups: groups, - }, nil -} - -// NewBootstrapTokenString converts the given Bootstrap Token as a string -// to the BootstrapTokenString object used for serialization/deserialization -// and internal usage. It also automatically validates that the given token -// is of the right format -func NewBootstrapTokenString(token string) (*BootstrapTokenString, error) { - substrs := bootstraputil.BootstrapTokenRegexp.FindStringSubmatch(token) - // TODO: Add a constant for the 3 value here, and explain better why it's needed (other than because how the regexp parsin works) - if len(substrs) != 3 { - return nil, errors.Errorf("the bootstrap token %q was not of the form %q", token, bootstrapapi.BootstrapTokenPattern) - } - - return &BootstrapTokenString{ID: substrs[1], Secret: substrs[2]}, nil -} - -// NewBootstrapTokenStringFromIDAndSecret is a wrapper around NewBootstrapTokenString -// that allows the caller to specify the ID and Secret separately -func NewBootstrapTokenStringFromIDAndSecret(id, secret string) (*BootstrapTokenString, error) { - return NewBootstrapTokenString(bootstraputil.TokenFromIDAndSecret(id, secret)) -} - -// CreateNewTokens tries to create a token and fails if one with the same ID already exists -func CreateNewTokens(client clientset.Interface, tokens []BootstrapToken) error { - return UpdateOrCreateTokens(client, true, tokens) -} - -// UpdateOrCreateTokens attempts to update a token with the given ID, or create if it does not already exist. -func UpdateOrCreateTokens(client clientset.Interface, failIfExists bool, tokens []BootstrapToken) error { - - for _, token := range tokens { - - secretName := bootstraputil.BootstrapTokenSecretName(token.Token.ID) - secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), secretName, metav1.GetOptions{}) - if secret != nil && err == nil && failIfExists { - return errors.Errorf("a token with id %q already exists", token.Token.ID) - } - - updatedOrNewSecret := token.ToSecret() - // Try to create or update the token with an exponential backoff - err = TryRunCommand(func() error { - if err := CreateOrUpdateSecret(client, updatedOrNewSecret); err != nil { - return errors.Wrapf(err, "failed to create or update bootstrap token with name %s", secretName) - } - return nil - }, 5) - if err != nil { - return err - } - } - return nil -} - -// TryRunCommand runs a function a maximum of failureThreshold times, and retries on error. If failureThreshold is hit; the last error is returned -func TryRunCommand(f func() error, failureThreshold int) error { - backoff := wait.Backoff{ - Duration: 5 * time.Second, - Factor: 2, // double the timeout for every failure - Steps: failureThreshold, - } - return wait.ExponentialBackoff(backoff, func() (bool, error) { - err := f() - if err != nil { - // Retry until the timeout - return false, nil - } - // The last f() call was a success, return cleanly - return true, nil - }) -} - -// CreateOrUpdateSecret creates a Secret if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. -func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error { - if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { - if !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "unable to create secret") - } - - if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil { - return errors.Wrap(err, "unable to update secret") - } - } - return nil -} - -// CreateOrUpdateConfigMap creates a ConfigMap if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. -func CreateOrUpdateConfigMap(client clientset.Interface, cm *v1.ConfigMap) error { - if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}); err != nil { - if !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "unable to create ConfigMap") - } - - if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{}); err != nil { - return errors.Wrap(err, "unable to update ConfigMap") - } - } - return nil -} - -// CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. -func CreateOrUpdateRole(client clientset.Interface, role *rbacv1.Role) error { - if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(context.TODO(), role, metav1.CreateOptions{}); err != nil { - if !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "unable to create RBAC role") - } - - if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Update(context.TODO(), role, metav1.UpdateOptions{}); err != nil { - return errors.Wrap(err, "unable to update RBAC role") - } - } - return nil -} - -// CreateOrUpdateRoleBinding creates a RoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. -func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbacv1.RoleBinding) error { - if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Create(context.TODO(), roleBinding, metav1.CreateOptions{}); err != nil { - if !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "unable to create RBAC rolebinding") - } - - if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Update(context.TODO(), roleBinding, metav1.UpdateOptions{}); err != nil { - return errors.Wrap(err, "unable to update RBAC rolebinding") - } - } - return nil -} - -// CreateBootstrapConfigMapIfNotExists creates the kube-public ConfigMap if it doesn't exist already -func CreateBootstrapConfigMapIfNotExists(client clientset.Interface, file string) error { - - klog.V(1).Infof("[bootstrap-token] Creating the %q ConfigMap in the %q namespace", bootstrapapi.ConfigMapClusterInfo, metav1.NamespacePublic) - - klog.V(1).Infoln("[bootstrap-token] loading admin kubeconfig") - adminConfig, err := clientcmd.LoadFromFile(file) - if err != nil { - return errors.Wrap(err, "failed to load admin kubeconfig") - } - - adminCluster := adminConfig.Contexts[adminConfig.CurrentContext].Cluster - // Copy the cluster from admin.conf to the bootstrap kubeconfig, contains the CA cert and the server URL - klog.V(1).Infoln("[bootstrap-token] copying the cluster from admin.conf to the bootstrap kubeconfig") - bootstrapConfig := &clientcmdapi.Config{ - Clusters: map[string]*clientcmdapi.Cluster{ - "": adminConfig.Clusters[adminCluster], - }, - } - bootstrapBytes, err := clientcmd.Write(*bootstrapConfig) - if err != nil { - return err - } - - // Create or update the ConfigMap in the kube-public namespace - klog.V(1).Infoln("[bootstrap-token] creating/updating ConfigMap in kube-public namespace") - return CreateOrUpdateConfigMap(client, &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: bootstrapapi.ConfigMapClusterInfo, - Namespace: metav1.NamespacePublic, - }, - Data: map[string]string{ - bootstrapapi.KubeConfigKey: string(bootstrapBytes), - }, - }) -} - -// CreateClusterInfoRBACRules creates the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace to unauthenticated users -func CreateClusterInfoRBACRules(client clientset.Interface) error { - klog.V(1).Infoln("creating the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace") - err := CreateOrUpdateRole(client, &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Name: BootstrapSignerClusterRoleName, - Namespace: metav1.NamespacePublic, - }, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{"get"}, - APIGroups: []string{""}, - Resources: []string{"configmaps"}, - ResourceNames: []string{bootstrapapi.ConfigMapClusterInfo}, - }, - }, - }) - if err != nil { - return err - } - - return CreateOrUpdateRoleBinding(client, &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: BootstrapSignerClusterRoleName, - Namespace: metav1.NamespacePublic, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "Role", - Name: BootstrapSignerClusterRoleName, - }, - Subjects: []rbacv1.Subject{ - { - Kind: rbacv1.UserKind, - Name: user.Anonymous, - }, - }, - }) -} diff --git a/pkg/yurtctl/cmd/convert/convert.go b/pkg/yurtctl/cmd/convert/convert.go index 3ce819fab80..75ff4133766 100644 --- a/pkg/yurtctl/cmd/convert/convert.go +++ b/pkg/yurtctl/cmd/convert/convert.go @@ -36,7 +36,7 @@ import ( nodeservant "github.com/openyurtio/openyurt/pkg/node-servant" "github.com/openyurtio/openyurt/pkg/preflight" - "github.com/openyurtio/openyurt/pkg/util/kubeadmapi" + kubeadmapi "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/phases/bootstraptoken/clusterinfo" "github.com/openyurtio/openyurt/pkg/yurtctl/lock" kubeutil "github.com/openyurtio/openyurt/pkg/yurtctl/util/kubernetes" strutil "github.com/openyurtio/openyurt/pkg/yurtctl/util/strings" diff --git a/pkg/yurtctl/cmd/join/join.go b/pkg/yurtctl/cmd/join/join.go index 4cd54e60f6d..2fff6b794ad 100644 --- a/pkg/yurtctl/cmd/join/join.go +++ b/pkg/yurtctl/cmd/join/join.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "os" + "strings" "github.com/lithammer/dedent" "github.com/pkg/errors" @@ -28,23 +29,18 @@ import ( flag "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/klog/v2" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" - kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2" - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" - "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" - kubeadmPhase "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/join" - "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" - cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - "k8s.io/kubernetes/cmd/kubeadm/app/discovery" - configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" - kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" + "github.com/openyurtio/openyurt/pkg/yurtctl/cmd/join/joindata" yurtphase "github.com/openyurtio/openyurt/pkg/yurtctl/cmd/join/phases" + yurtconstants "github.com/openyurtio/openyurt/pkg/yurtctl/constants" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/options" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/discovery/token" + kubeconfigutil "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig" + yurtctlutil "github.com/openyurtio/openyurt/pkg/yurtctl/util/kubernetes" ) var ( @@ -59,36 +55,33 @@ var ( ) type joinOptions struct { - cfgPath string - token string - controlPlane bool - ignorePreflightErrors []string - externalcfg *kubeadmapiv1beta2.JoinConfiguration - kustomizeDir string - nodeType string - yurthubImage string + token string + nodeType string + nodeName string + criSocket string + organizations string + pauseImage string + yurthubImage string + caCertHashes []string + unsafeSkipCAVerification bool + ignorePreflightErrors []string + nodeLabels string } // newJoinOptions returns a struct ready for being used for creating cmd join flags. func newJoinOptions() *joinOptions { - // initialize the public kubeadm config API by applying defaults - externalcfg := &kubeadmapiv1beta2.JoinConfiguration{} - - // Add optional config objects to host flags. - // un-set objects will be cleaned up afterwards (into newJoinData func) - externalcfg.Discovery.File = &kubeadmapiv1beta2.FileDiscovery{} - externalcfg.Discovery.BootstrapToken = &kubeadmapiv1beta2.BootstrapTokenDiscovery{} - externalcfg.ControlPlane = &kubeadmapiv1beta2.JoinControlPlane{} - - // Apply defaults - kubeadmscheme.Scheme.Default(externalcfg) - return &joinOptions{ - externalcfg: externalcfg, + nodeType: yurtconstants.EdgeNode, + criSocket: constants.DefaultDockerCRISocket, + pauseImage: yurtconstants.PauseImagePath, + yurthubImage: fmt.Sprintf("%s/%s:%s", yurtconstants.DefaultOpenYurtImageRegistry, yurtconstants.Yurthub, yurtconstants.DefaultOpenYurtVersion), + caCertHashes: make([]string, 0), + unsafeSkipCAVerification: false, + ignorePreflightErrors: make([]string, 0), } } -// NewJoinCmd returns "yurtctl join" command. +// NewCmdJoin returns "yurtctl join" command. func NewCmdJoin(out io.Writer, joinOptions *joinOptions) *cobra.Command { if joinOptions == nil { joinOptions = newJoinOptions() @@ -99,15 +92,10 @@ func NewCmdJoin(out io.Writer, joinOptions *joinOptions) *cobra.Command { Use: "join [api-server-endpoint]", Short: "Run this on any machine you wish to join an existing cluster", RunE: func(cmd *cobra.Command, args []string) error { - c, err := joinRunner.InitData(args) - if err != nil { - return err - } - data := c.(*joinData) if err := joinRunner.Run(args); err != nil { return err } - fmt.Fprint(data.outputWriter, joinWorkerNodeDoneMsg) + fmt.Fprint(out, joinWorkerNodeDoneMsg) return nil }, } @@ -115,11 +103,9 @@ func NewCmdJoin(out io.Writer, joinOptions *joinOptions) *cobra.Command { addJoinConfigFlags(cmd.Flags(), joinOptions) joinRunner.AppendPhase(yurtphase.NewPreparePhase()) - joinRunner.AppendPhase(kubeadmPhase.NewPreflightPhase()) + joinRunner.AppendPhase(yurtphase.NewPreflightPhase()) joinRunner.AppendPhase(yurtphase.NewEdgeNodePhase()) - joinRunner.AppendPhase(yurtphase.NewCloudNodePhase()) joinRunner.AppendPhase(yurtphase.NewPostcheckPhase()) - joinRunner.SetDataInitializer(func(cmd *cobra.Command, args []string) (workflow.RunData, error) { return newJoinData(cmd, args, joinOptions, out) }) @@ -130,15 +116,39 @@ func NewCmdJoin(out io.Writer, joinOptions *joinOptions) *cobra.Command { // addJoinConfigFlags adds join flags bound to the config to the specified flagset func addJoinConfigFlags(flagSet *flag.FlagSet, joinOptions *joinOptions) { flagSet.StringVar( - &joinOptions.externalcfg.NodeRegistration.Name, options.NodeName, joinOptions.externalcfg.NodeRegistration.Name, - `Specify the node name.`, + &joinOptions.token, options.TokenStr, "", + "Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.", + ) + flagSet.StringVar( + &joinOptions.nodeType, options.NodeType, joinOptions.nodeType, + "Sets the node is edge or cloud", + ) + flagSet.StringVar( + &joinOptions.nodeName, options.NodeName, joinOptions.nodeName, + `Specify the node name. if not specified, hostname will be used.`, + ) + flagSet.StringVar( + &joinOptions.criSocket, options.NodeCRISocket, joinOptions.criSocket, + "Path to the CRI socket to connect", + ) + flagSet.StringVar( + &joinOptions.organizations, options.Organizations, joinOptions.organizations, + "Organizations that will be added into hub's client certificate", + ) + flagSet.StringVar( + &joinOptions.pauseImage, options.PauseImage, joinOptions.pauseImage, + "Sets the image version of pause container", + ) + flagSet.StringVar( + &joinOptions.yurthubImage, options.YurtHubImage, joinOptions.yurthubImage, + "Sets the image version of yurthub component", ) flagSet.StringSliceVar( - &joinOptions.externalcfg.Discovery.BootstrapToken.CACertHashes, options.TokenDiscoveryCAHash, []string{}, + &joinOptions.caCertHashes, options.TokenDiscoveryCAHash, joinOptions.caCertHashes, "For token-based discovery, validate that the root CA public key matches this hash (format: \":\").", ) flagSet.BoolVar( - &joinOptions.externalcfg.Discovery.BootstrapToken.UnsafeSkipCAVerification, options.TokenDiscoverySkipCAHash, false, + &joinOptions.unsafeSkipCAVerification, options.TokenDiscoverySkipCAHash, false, "For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.", ) flagSet.StringSliceVar( @@ -146,234 +156,167 @@ func addJoinConfigFlags(flagSet *flag.FlagSet, joinOptions *joinOptions) { "A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.", ) flagSet.StringVar( - &joinOptions.token, options.TokenStr, "", - "Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.", - ) - flagSet.StringVar( - &joinOptions.nodeType, "node-type", "", - "Sets the node is edge-node or cloud-node", - ) - flagSet.StringVar( - &joinOptions.yurthubImage, "yurthub-image", "", - "Sets the image version of yurthub component", + &joinOptions.nodeLabels, options.NodeLabels, joinOptions.nodeLabels, + "Sets the labels for joining node", ) - cmdutil.AddCRISocketFlag(flagSet, &joinOptions.externalcfg.NodeRegistration.CRISocket) } type joinData struct { - cfg *kubeadmapi.JoinConfiguration - initCfg *kubeadmapi.InitConfiguration + joinNodeData *joindata.NodeRegistration + apiServerEndpoint string + token string tlsBootstrapCfg *clientcmdapi.Config clientSet *clientset.Clientset ignorePreflightErrors sets.String - outputWriter io.Writer - kustomizeDir string - nodeType string + organizations string + pauseImage string yurthubImage string + kubernetesVersion string + caCertHashes sets.String + nodeLabels map[string]string } // newJoinData returns a new joinData struct to be used for the execution of the kubeadm join workflow. // This func takes care of validating joinOptions passed to the command, and then it converts -// options into the internal JoinConfiguration type that is used as input all the phases in the kubeadm join workflow +// options into the internal JoinData type that is used as input all the phases in the kubeadm join workflow func newJoinData(cmd *cobra.Command, args []string, opt *joinOptions, out io.Writer) (*joinData, error) { - // Re-apply defaults to the public kubeadm API (this will set only values not exposed/not set as a flags) - kubeadmscheme.Scheme.Default(opt.externalcfg) - - // Validate standalone flags values and/or combination of flags and then assigns - // validated values to the public kubeadm config API when applicable - - // if a token is provided, use this value for both discovery-token and tls-bootstrap-token when those values are not provided - if len(opt.token) > 0 { - if len(opt.externalcfg.Discovery.TLSBootstrapToken) == 0 { - opt.externalcfg.Discovery.TLSBootstrapToken = opt.token - } - if len(opt.externalcfg.Discovery.BootstrapToken.Token) == 0 { - opt.externalcfg.Discovery.BootstrapToken.Token = opt.token - } - } - - // if a file or URL from which to load cluster information was not provided, unset the Discovery.File object - if len(opt.externalcfg.Discovery.File.KubeConfigPath) == 0 { - opt.externalcfg.Discovery.File = nil - } - // if an APIServerEndpoint from which to retrieve cluster information was not provided, unset the Discovery.BootstrapToken object + var apiServerEndpoint string if len(args) == 0 { - opt.externalcfg.Discovery.BootstrapToken = nil + return nil, errors.New("apiServer endpoint is empty") } else { - if len(opt.cfgPath) == 0 && len(args) > 1 { + if len(args) > 1 { klog.Warningf("[preflight] WARNING: More than one API server endpoint supplied on command line %v. Using the first one.", args) } - opt.externalcfg.Discovery.BootstrapToken.APIServerEndpoint = args[0] + apiServerEndpoint = args[0] } - // if not joining a control plane, unset the ControlPlane object - if !opt.controlPlane { - if opt.externalcfg.ControlPlane != nil { - klog.Warningf("[preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when %s flag is not set.", options.ControlPlane) - } - opt.externalcfg.ControlPlane = nil + if len(opt.token) == 0 { + return nil, errors.New("join token is empty, so unable to bootstrap worker node.") } - // if the admin.conf file already exists, use it for skipping the discovery process. - // NB. this case can happen when we are joining a control-plane node only (and phases are invoked atomically) - var adminKubeConfigPath = kubeadmconstants.GetAdminKubeConfigPath() - var tlsBootstrapCfg *clientcmdapi.Config - if _, err := os.Stat(adminKubeConfigPath); err == nil && opt.controlPlane { - // use the admin.conf as tlsBootstrapCfg, that is the kubeconfig file used for reading the kubeadm-config during discovery - klog.V(1).Infof("[preflight] found %s. Use it for skipping discovery", adminKubeConfigPath) - tlsBootstrapCfg, err = clientcmd.LoadFromFile(adminKubeConfigPath) - if err != nil { - return nil, errors.Wrapf(err, "Error loading %s", adminKubeConfigPath) - } + if opt.nodeType != yurtconstants.EdgeNode && opt.nodeType != yurtconstants.CloudNode { + return nil, errors.Errorf("node type(%s) is invalid, only \"edge and cloud\" are supported", opt.nodeType) } - if err := validation.ValidateMixedArguments(cmd.Flags()); err != nil { - return nil, err + if opt.unsafeSkipCAVerification && len(opt.caCertHashes) != 0 { + return nil, errors.Errorf("when --discovery-token-ca-cert-hash is specified, --discovery-token-unsafe-skip-ca-verification should be false.") + } else if len(opt.caCertHashes) == 0 && !opt.unsafeSkipCAVerification { + return nil, errors.Errorf("when --discovery-token-ca-cert-hash is not specified, --discovery-token-unsafe-skip-ca-verification should be true") + } + + var ignoreErrors sets.String + for i := range opt.ignorePreflightErrors { + ignoreErrors.Insert(opt.ignorePreflightErrors[i]) } // Either use the config file if specified, or convert public kubeadm API to the internal JoinConfiguration // and validates JoinConfiguration - if opt.externalcfg.NodeRegistration.Name == "" { + name := opt.nodeName + if name == "" { klog.V(1).Infoln("[preflight] found NodeName empty; using OS hostname as NodeName") + hostname, err := os.Hostname() + if err != nil { + return nil, err + } + name = hostname } - if opt.externalcfg.ControlPlane != nil && opt.externalcfg.ControlPlane.LocalAPIEndpoint.AdvertiseAddress == "" { - klog.V(1).Infoln("[preflight] found advertiseAddress empty; using default interface's IP address as advertiseAddress") + data := &joinData{ + apiServerEndpoint: apiServerEndpoint, + token: opt.token, + tlsBootstrapCfg: nil, + ignorePreflightErrors: ignoreErrors, + pauseImage: opt.pauseImage, + yurthubImage: opt.yurthubImage, + caCertHashes: sets.NewString(opt.caCertHashes...), + organizations: opt.organizations, + nodeLabels: make(map[string]string), + joinNodeData: &joindata.NodeRegistration{ + Name: name, + WorkingMode: opt.nodeType, + CRISocket: opt.criSocket, + Organizations: opt.organizations, + }, } - cfg, err := configutil.LoadOrDefaultJoinConfiguration(opt.cfgPath, opt.externalcfg) - if err != nil { - return nil, err + // parse node labels + if len(opt.nodeLabels) != 0 { + parts := strings.Split(opt.nodeLabels, ",") + for i := range parts { + kv := strings.Split(parts[i], "=") + if len(kv) != 2 { + klog.Warningf("node labels(%s) format is invalid, expect k1=v1,k2=v2", parts[i]) + continue + } + data.nodeLabels[kv[0]] = kv[1] + } } - ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(opt.ignorePreflightErrors, cfg.NodeRegistration.IgnorePreflightErrors) + // get tls bootstrap config + cfg, err := token.RetrieveBootstrapConfig(data) if err != nil { + klog.Errorf("failed to retrieve bootstrap config, %v", err) return nil, err } - // Also set the union of pre-flight errors to JoinConfiguration, to provide a consistent view of the runtime configuration: - cfg.NodeRegistration.IgnorePreflightErrors = ignorePreflightErrorsSet.List() + data.tlsBootstrapCfg = cfg - // override node name and CRI socket from the command line opt - if opt.externalcfg.NodeRegistration.Name != "" { - cfg.NodeRegistration.Name = opt.externalcfg.NodeRegistration.Name - } - if opt.externalcfg.NodeRegistration.CRISocket != "" { - cfg.NodeRegistration.CRISocket = opt.externalcfg.NodeRegistration.CRISocket + // get kubernetes version + client, err := kubeconfigutil.ToClientSet(cfg) + if err != nil { + klog.Errorf("failed to create bootstrap client, %v", err) + return nil, err } + data.clientSet = client - if cfg.ControlPlane != nil { - if err := configutil.VerifyAPIServerBindAddress(cfg.ControlPlane.LocalAPIEndpoint.AdvertiseAddress); err != nil { - return nil, err - } + k8sVersion, err := yurtctlutil.GetKubernetesVersionFromCluster(client) + if err != nil { + klog.Errorf("failed to get kubernetes version, %v", err) + return nil, err } + data.kubernetesVersion = k8sVersion + klog.Infof("node join data info: %#+v", *data) - return &joinData{ - cfg: cfg, - tlsBootstrapCfg: tlsBootstrapCfg, - ignorePreflightErrors: ignorePreflightErrorsSet, - outputWriter: out, - kustomizeDir: opt.kustomizeDir, - nodeType: opt.nodeType, - yurthubImage: opt.yurthubImage, - }, nil + return data, nil } -// CertificateKey returns the key used to encrypt the certs. -func (j *joinData) CertificateKey() string { - if j.cfg.ControlPlane != nil { - return j.cfg.ControlPlane.CertificateKey - } - return "" +// ServerAddr returns the public address of kube-apiserver. +func (j *joinData) ServerAddr() string { + return j.apiServerEndpoint } -// Cfg returns the JoinConfiguration. -func (j *joinData) Cfg() *kubeadmapi.JoinConfiguration { - return j.cfg +// JoinToken returns bootstrap token for joining node +func (j *joinData) JoinToken() string { + return j.token } -// TLSBootstrapCfg returns the cluster-info (kubeconfig). -func (j *joinData) TLSBootstrapCfg() (*clientcmdapi.Config, error) { - if j.tlsBootstrapCfg != nil { - return j.tlsBootstrapCfg, nil - } - klog.V(1).Infoln("[preflight] Discovering cluster-info") - tlsBootstrapCfg, err := discovery.For(j.cfg) - j.tlsBootstrapCfg = tlsBootstrapCfg - return tlsBootstrapCfg, err +// PauseImage returns the pause image. +func (j *joinData) PauseImage() string { + return j.pauseImage } -// InitCfg returns the InitConfiguration. -func (j *joinData) InitCfg() (*kubeadmapi.InitConfiguration, error) { - if j.initCfg != nil { - return j.initCfg, nil - } - if _, err := j.TLSBootstrapCfg(); err != nil { - return nil, err - } - for _, cluster := range j.tlsBootstrapCfg.Clusters { - cluster.Server = fmt.Sprintf("https://%s", j.cfg.Discovery.BootstrapToken.APIServerEndpoint) - } - klog.V(1).Infoln("[preflight] Fetching init configuration") - initCfg, err := fetchInitConfigurationFromJoinConfiguration(j.cfg, j.tlsBootstrapCfg) - j.initCfg = initCfg - return initCfg, err +// YurtHubImage returns the YurtHub image. +func (j *joinData) YurtHubImage() string { + return j.yurthubImage } -// fetchInitConfigurationFromJoinConfiguration retrieves the init configuration from a join configuration, performing the discovery -func fetchInitConfigurationFromJoinConfiguration(cfg *kubeadmapi.JoinConfiguration, tlsBootstrapCfg *clientcmdapi.Config) (*kubeadmapi.InitConfiguration, error) { - // Retrieves the kubeadm configuration - klog.V(1).Infoln("[preflight] Retrieving KubeConfig objects") - initConfiguration, err := fetchInitConfiguration(tlsBootstrapCfg) - if err != nil { - return nil, err - } - - // Create the final KubeConfig file with the cluster name discovered after fetching the cluster configuration - clusterinfo := kubeconfigutil.GetClusterFromKubeConfig(tlsBootstrapCfg) - tlsBootstrapCfg.Clusters = map[string]*clientcmdapi.Cluster{ - initConfiguration.ClusterName: clusterinfo, - } - tlsBootstrapCfg.Contexts[tlsBootstrapCfg.CurrentContext].Cluster = initConfiguration.ClusterName - - // injects into the kubeadm configuration the information about the joining node - initConfiguration.NodeRegistration = cfg.NodeRegistration - if cfg.ControlPlane != nil { - initConfiguration.LocalAPIEndpoint = cfg.ControlPlane.LocalAPIEndpoint - } - - return initConfiguration, nil +// KubernetesVersion returns the kubernetes version. +func (j *joinData) KubernetesVersion() string { + return j.kubernetesVersion } -// fetchInitConfiguration reads the cluster configuration from the kubeadm-admin configMap -func fetchInitConfiguration(tlsBootstrapCfg *clientcmdapi.Config) (*kubeadmapi.InitConfiguration, error) { - // creates a client to access the cluster using the bootstrap token identity - tlsClient, err := kubeconfigutil.ToClientSet(tlsBootstrapCfg) - if err != nil { - return nil, errors.Wrap(err, "unable to access the cluster") - } - - // Fetches the init configuration - initConfiguration, err := configutil.FetchInitConfigurationFromCluster(tlsClient, os.Stdout, "preflight", true) - if err != nil { - return nil, errors.Wrap(err, "unable to fetch the kubeadm-config ConfigMap") - } +// TLSBootstrapCfg returns the cluster-info (kubeconfig). +func (j *joinData) TLSBootstrapCfg() *clientcmdapi.Config { + return j.tlsBootstrapCfg +} - return initConfiguration, nil +// BootstrapClient returns the kube clientset. +func (j *joinData) BootstrapClient() *clientset.Clientset { + return j.clientSet } -// ClientSet returns the ClientSet for accessing the cluster with the identity defined in admin.conf. -func (j *joinData) ClientSet() (*clientset.Clientset, error) { - if j.clientSet != nil { - return j.clientSet, nil - } - path := kubeadmconstants.GetAdminKubeConfigPath() - client, err := kubeconfigutil.ClientSetFromFile(path) - if err != nil { - return nil, err - } - j.clientSet = client - return client, nil +func (j *joinData) NodeRegistration() *joindata.NodeRegistration { + return j.joinNodeData } // IgnorePreflightErrors returns the list of preflight errors to ignore. @@ -381,22 +324,10 @@ func (j *joinData) IgnorePreflightErrors() sets.String { return j.ignorePreflightErrors } -// OutputWriter returns the io.Writer used to write messages such as the "join done" message. -func (j *joinData) OutputWriter() io.Writer { - return j.outputWriter +func (j *joinData) CaCertHashes() sets.String { + return j.caCertHashes } -// KustomizeDir returns the folder where kustomize patches for static pod manifest are stored -func (j *joinData) KustomizeDir() string { - return j.kustomizeDir -} - -//NodeType returns the node is cloud-node or edge-node. -func (j *joinData) NodeType() string { - return j.nodeType -} - -//YurtHubImage returns the YurtHub image. -func (j *joinData) YurtHubImage() string { - return j.yurthubImage +func (j *joinData) NodeLabels() map[string]string { + return j.nodeLabels } diff --git a/pkg/yurtctl/cmd/join/joindata/data.go b/pkg/yurtctl/cmd/join/joindata/data.go new file mode 100644 index 00000000000..74e08daa32f --- /dev/null +++ b/pkg/yurtctl/cmd/join/joindata/data.go @@ -0,0 +1,44 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package joindata + +import ( + "k8s.io/apimachinery/pkg/util/sets" + clientset "k8s.io/client-go/kubernetes" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +type NodeRegistration struct { + Name string + CRISocket string + WorkingMode string + Organizations string +} + +type YurtJoinData interface { + ServerAddr() string + JoinToken() string + PauseImage() string + YurtHubImage() string + KubernetesVersion() string + TLSBootstrapCfg() *clientcmdapi.Config + BootstrapClient() *clientset.Clientset + NodeRegistration() *NodeRegistration + CaCertHashes() sets.String + NodeLabels() map[string]string + IgnorePreflightErrors() sets.String +} diff --git a/pkg/yurtctl/cmd/join/phases/join-cloud-node.go b/pkg/yurtctl/cmd/join/phases/join-cloud-node.go deleted file mode 100644 index 762fe9141fc..00000000000 --- a/pkg/yurtctl/cmd/join/phases/join-cloud-node.go +++ /dev/null @@ -1,225 +0,0 @@ -/* -Copyright 2021 The OpenYurt Authors. -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package phases - -import ( - "context" - "fmt" - "os" - - "github.com/lithammer/dedent" - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/version" - "k8s.io/apimachinery/pkg/util/wait" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - certutil "k8s.io/client-go/util/cert" - "k8s.io/klog/v2" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" - "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - kubeletphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet" - patchnodephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/patchnode" - "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" - kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" - - "github.com/openyurtio/openyurt/pkg/yurtctl/constants" -) - -var ( - kubeadmJoinFailMsg = dedent.Dedent(` - Unfortunately, an error has occurred: - %v - - This error is likely caused by: - - The kubelet is not running - - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled) - - If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands: - - 'systemctl status kubelet' - - 'journalctl -xeu kubelet' - `) -) - -// NewCloudNodePhase creates a yurtctl workflow phase that start kubelet on a cloud node. -func NewCloudNodePhase() workflow.Phase { - return workflow.Phase{ - Name: "kubelet-start [api-server-endpoint]", - Short: "Write kubelet settings, certificates and (re)start the kubelet", - Long: "Write a file with KubeletConfiguration and an environment file with node specific kubelet settings, and then (re)start kubelet.", - Run: runKubeletStartJoinPhase, - InheritFlags: []string{ - options.CfgPath, - options.NodeCRISocket, - options.NodeName, - options.FileDiscovery, - options.TokenDiscovery, - options.TokenDiscoveryCAHash, - options.TokenDiscoverySkipCAHash, - options.TLSBootstrapToken, - options.TokenStr, - }, - } -} - -//getCloudNodeJoinData get node configuration for cloud-node. -func getCloudNodeJoinData(c workflow.RunData) (*kubeadmapi.JoinConfiguration, *kubeadmapi.InitConfiguration, *clientcmdapi.Config, error) { - data, ok := c.(YurtJoinData) - if !ok { - return nil, nil, nil, errors.New("kubelet-start phase invoked with an invalid data struct") - } - cfg := data.Cfg() - initCfg, err := data.InitCfg() - if err != nil { - return nil, nil, nil, err - } - tlsBootstrapCfg, err := data.TLSBootstrapCfg() - if err != nil { - return nil, nil, nil, err - } - return cfg, initCfg, tlsBootstrapCfg, nil -} - -// runKubeletStartJoinPhase executes the kubelet TLS bootstrap process. -// This process is executed by the kubelet and completes with the node joining the cluster -// with a dedicates set of credentials as required by the node authorizer -func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) { - data, ok := c.(YurtJoinData) - if !ok { - return errors.New("kubelet-start phase invoked with an invalid data struct") - } - if data.NodeType() != constants.CloudNode { - return - } - cfg, initCfg, tlsBootstrapCfg, err := getCloudNodeJoinData(c) - if err != nil { - return err - } - bootstrapKubeConfigFile := kubeadmconstants.GetBootstrapKubeletKubeConfigPath() - - // Deletes the bootstrapKubeConfigFile, so the credential used for TLS bootstrap is removed from disk - defer os.Remove(bootstrapKubeConfigFile) - - // Write the bootstrap kubelet config file or the TLS-Bootstrapped kubelet config file down to disk - klog.V(1).Infof("[kubelet-start] writing bootstrap kubelet config file at %s", bootstrapKubeConfigFile) - if err := kubeconfigutil.WriteToDisk(bootstrapKubeConfigFile, tlsBootstrapCfg); err != nil { - return errors.Wrap(err, "couldn't save bootstrap-kubelet.conf to disk") - } - - // Write the ca certificate to disk so kubelet can use it for authentication - cluster := tlsBootstrapCfg.Contexts[tlsBootstrapCfg.CurrentContext].Cluster - if _, err := os.Stat(cfg.CACertPath); os.IsNotExist(err) { - klog.V(1).Infof("[kubelet-start] writing CA certificate at %s", cfg.CACertPath) - if err := certutil.WriteCert(cfg.CACertPath, tlsBootstrapCfg.Clusters[cluster].CertificateAuthorityData); err != nil { - return errors.Wrap(err, "couldn't save the CA certificate to disk") - } - } - - kubeletVersion, err := version.ParseSemantic(initCfg.ClusterConfiguration.KubernetesVersion) - if err != nil { - return err - } - - bootstrapClient, err := kubeconfigutil.ClientSetFromFile(bootstrapKubeConfigFile) - if err != nil { - return errors.Errorf("couldn't create client from kubeconfig file %q", bootstrapKubeConfigFile) - } - - // Obtain the name of this Node. - nodeName, _, err := kubeletphase.GetNodeNameAndHostname(&cfg.NodeRegistration) - if err != nil { - klog.Warning(err) - } - - // Make sure to exit before TLS bootstrap if a Node with the same name exist in the cluster - // and it has the "Ready" status. - // A new Node with the same name as an existing control-plane Node can cause undefined - // behavior and ultimately control-plane failure. - klog.V(1).Infof("[kubelet-start] Checking for an existing Node in the cluster with name %q and status %q", nodeName, v1.NodeReady) - node, err := bootstrapClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return errors.Wrapf(err, "cannot get Node %q", nodeName) - } - for _, cond := range node.Status.Conditions { - if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue { - return errors.Errorf("a Node with name %q and status %q already exists in the cluster. "+ - "You must delete the existing Node or change the name of this new joining Node", nodeName, v1.NodeReady) - } - } - - // Configure the kubelet. In this short timeframe, kubeadm is trying to stop/restart the kubelet - // Try to stop the kubelet service so no race conditions occur when configuring it - klog.V(1).Infoln("[kubelet-start] Stopping the kubelet") - kubeletphase.TryStopKubelet() - - // Write the configuration for the kubelet (using the bootstrap token credentials) to disk so the kubelet can start - if err := kubeletphase.DownloadConfig(bootstrapClient, kubeletVersion, kubeadmconstants.KubeletRunDirectory); err != nil { - return err - } - - // Write env file with flags for the kubelet to use. We only want to - // register the joining node with the specified taints if the node - // is not a control-plane. The mark-control-plane phase will register the taints otherwise. - registerTaintsUsingFlags := cfg.ControlPlane == nil - if err := kubeletphase.WriteKubeletDynamicEnvFile(&initCfg.ClusterConfiguration, &initCfg.NodeRegistration, registerTaintsUsingFlags, kubeadmconstants.KubeletRunDirectory); err != nil { - return err - } - - // Try to start the kubelet service in case it's inactive - fmt.Println("[kubelet-start] Starting the kubelet") - kubeletphase.TryStartKubelet() - - // Now the kubelet will perform the TLS Bootstrap, transforming /etc/kubernetes/bootstrap-kubelet.conf to /etc/kubernetes/kubelet.conf - // Wait for the kubelet to create the /etc/kubernetes/kubelet.conf kubeconfig file. If this process - // times out, display a somewhat user-friendly message. - waiter := apiclient.NewKubeWaiter(nil, kubeadmconstants.TLSBootstrapTimeout, os.Stdout) - if err := waiter.WaitForKubeletAndFunc(waitForTLSBootstrappedClient); err != nil { - fmt.Printf(kubeadmJoinFailMsg, err) - return err - } - - // When we know the /etc/kubernetes/kubelet.conf file is available, get the client - client, err := kubeconfigutil.ClientSetFromFile(kubeadmconstants.GetKubeletKubeConfigPath()) - if err != nil { - return err - } - - klog.V(1).Infoln("[kubelet-start] preserving the crisocket information for the node") - if err := patchnodephase.AnnotateCRISocket(client, cfg.NodeRegistration.Name, cfg.NodeRegistration.CRISocket); err != nil { - return errors.Wrap(err, "error uploading crisocket") - } - - return nil -} - -// waitForTLSBootstrappedClient waits for the /etc/kubernetes/kubelet.conf file to be available -func waitForTLSBootstrappedClient() error { - fmt.Println("[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...") - - // Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned. - return wait.PollImmediate(kubeadmconstants.TLSBootstrapRetryInterval, kubeadmconstants.TLSBootstrapTimeout, func() (bool, error) { - // Check that we can create a client set out of the kubelet kubeconfig. This ensures not - // only that the kubeconfig file exists, but that other files required by it also exist (like - // client certificate and key) - _, err := kubeconfigutil.ClientSetFromFile(kubeadmconstants.GetKubeletKubeConfigPath()) - return (err == nil), nil - }) -} diff --git a/pkg/yurtctl/cmd/join/phases/join-edge-node.go b/pkg/yurtctl/cmd/join/phases/join-edge-node.go deleted file mode 100644 index 375aa329184..00000000000 --- a/pkg/yurtctl/cmd/join/phases/join-edge-node.go +++ /dev/null @@ -1,238 +0,0 @@ -/* -Copyright 2021 The OpenYurt Authors. -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package phases - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/pkg/errors" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/version" - clientset "k8s.io/client-go/kubernetes" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - certutil "k8s.io/client-go/util/cert" - "k8s.io/klog/v2" - kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - kubeletphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet" - "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" - kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" - kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" - kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/config/scheme" - utilcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec" - - "github.com/openyurtio/openyurt/pkg/yurtctl/constants" - "github.com/openyurtio/openyurt/pkg/yurtctl/util/edgenode" - "github.com/openyurtio/openyurt/pkg/yurthub/util" -) - -// NewEdgeNodePhase creates a yurtctl workflow phase that start kubelet on a edge node. -func NewEdgeNodePhase() workflow.Phase { - return workflow.Phase{ - Name: "Join edge-node to OpenYurt cluster. ", - Short: "Join edge-node", - Run: runJoinEdgeNode, - } -} - -//runJoinEdgeNode executes the edge node join process. -func runJoinEdgeNode(c workflow.RunData) error { - data, ok := c.(YurtJoinData) - if !ok { - return fmt.Errorf("Join edge-node phase invoked with an invalid data struct. ") - } - if data.NodeType() != constants.EdgeNode { - return nil - } - cfg, initCfg, tlsBootstrapCfg, err := getEdgeNodeJoinData(data) - if err != nil { - return err - } - - if err := setKubeletConfigForEdgeNode(); err != nil { - return err - } - clusterinfo := kubeconfigutil.GetClusterFromKubeConfig(tlsBootstrapCfg) - if err := certutil.WriteCert(edgenode.KubeCaFile, clusterinfo.CertificateAuthorityData); err != nil { - return err - } - - tlsClient, err := kubeconfigutil.ToClientSet(tlsBootstrapCfg) - if err != nil { - return err - } - kc, err := getKubeletConfig(cfg, initCfg, tlsClient) - if err != nil { - return err - } - if err := addYurthubStaticYaml(cfg, kc.StaticPodPath, data.YurtHubImage()); err != nil { - return err - } - klog.Info("[kubelet-start] Starting the kubelet") - kubeletphase.TryStartKubelet() - return nil -} - -//setKubeleConfigForEdgeNode write kubelet.conf for edge-node. -func setKubeletConfigForEdgeNode() error { - kubeletConfigDir := filepath.Dir(edgenode.KubeCondfigPath) - if _, err := os.Stat(kubeletConfigDir); err != nil { - if os.IsNotExist(err) { - if err := os.MkdirAll(kubeletConfigDir, os.ModePerm); err != nil { - klog.Errorf("Create dir %s fail: %v", kubeletConfigDir, err) - return err - } - } else { - klog.Errorf("Describe dir %s fail: %v", kubeletConfigDir, err) - return err - } - } - if err := ioutil.WriteFile(edgenode.KubeCondfigPath, []byte(kubeletConfForEdgeNode), 0755); err != nil { - return err - } - return nil -} - -//addYurthubStaticYaml generate YurtHub static yaml for edge-node. -func addYurthubStaticYaml(cfg *kubeadmapi.JoinConfiguration, podManifestPath string, yurthubImage string) error { - klog.Info("[join-node] Adding edge hub static yaml") - if len(yurthubImage) == 0 { - yurthubImage = fmt.Sprintf("%s/%s:%s", constants.DefaultOpenYurtImageRegistry, constants.Yurthub, constants.DefaultOpenYurtVersion) - } - if _, err := os.Stat(podManifestPath); err != nil { - if os.IsNotExist(err) { - err = os.MkdirAll(podManifestPath, os.ModePerm) - if err != nil { - return err - } - } else { - klog.Errorf("Describe dir %s fail: %v", podManifestPath, err) - return err - } - } - - yurthubTemplate := edgenode.ReplaceRegularExpression(edgenode.YurthubTemplate, - map[string]string{ - "__kubernetes_service_addr__": fmt.Sprintf("https://%s", cfg.Discovery.BootstrapToken.APIServerEndpoint), - "__yurthub_image__": yurthubImage, - "__join_token__": cfg.Discovery.BootstrapToken.Token, - "__working_mode__": string(util.WorkingModeEdge), - }) - - if err := ioutil.WriteFile(filepath.Join(podManifestPath, defaultYurthubStaticPodFileName), []byte(yurthubTemplate), 0600); err != nil { - return err - } - klog.Info("[join-node] Add edge hub static yaml is ok") - return nil -} - -//getKubeletConfig get kubelet configure from master. -func getKubeletConfig(cfg *kubeadmapi.JoinConfiguration, initCfg *kubeadmapi.InitConfiguration, tlsClient *clientset.Clientset) (*kubeletconfig.KubeletConfiguration, error) { - kubeletVersion, err := version.ParseSemantic(initCfg.ClusterConfiguration.KubernetesVersion) - if err != nil { - return nil, err - } - - // Write the configuration for the kubelet (using the bootstrap token credentials) to disk so the kubelet can start - kc, err := downloadConfig(tlsClient, kubeletVersion, kubeadmconstants.KubeletRunDirectory) - if err != nil { - return nil, err - } - if err := kubeletphase.WriteKubeletDynamicEnvFile(&initCfg.ClusterConfiguration, &cfg.NodeRegistration, false, kubeadmconstants.KubeletRunDirectory); err != nil { - return kc, err - } - return kc, nil -} - -// downloadConfig downloads the kubelet configuration from a ConfigMap and writes it to disk. -// Used at "kubeadm join" time -func downloadConfig(client clientset.Interface, kubeletVersion *version.Version, kubeletDir string) (*kubeletconfig.KubeletConfiguration, error) { - - // Download the ConfigMap from the cluster based on what version the kubelet is - configMapName := kubeadmconstants.GetKubeletConfigMapName(kubeletVersion) - - fmt.Printf("[kubelet-start] Downloading configuration for the kubelet from the %q ConfigMap in the %s namespace\n", - configMapName, metav1.NamespaceSystem) - - kubeletCfg, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, configMapName) - // If the ConfigMap wasn't found and the kubelet version is v1.10.x, where we didn't support the config file yet - // just return, don't error out - if apierrors.IsNotFound(err) && kubeletVersion.Minor() == 10 { - return nil, nil - } - if err != nil { - return nil, err - } - _, kubeletCodecs, err := kubeletscheme.NewSchemeAndCodecs() - if err != nil { - return nil, err - } - kc, err := utilcodec.DecodeKubeletConfiguration(kubeletCodecs, []byte(kubeletCfg.Data[kubeadmconstants.KubeletBaseConfigurationConfigMapKey])) - if err != nil { - return nil, err - } - if kc.StaticPodPath == "" { - kc.StaticPodPath = constants.StaticPodPath - } - encoder, err := utilcodec.NewKubeletconfigYAMLEncoder(kubeletconfigv1beta1.SchemeGroupVersion) - if err != nil { - return nil, err - } - data, err := runtime.Encode(encoder, kc) - if err != nil { - return nil, err - } - return kc, writeConfigBytesToDisk(data, kubeletDir) -} - -//getEdgeNodeJoinData get edge-node join configuration. -func getEdgeNodeJoinData(data YurtJoinData) (*kubeadmapi.JoinConfiguration, *kubeadmapi.InitConfiguration, *clientcmdapi.Config, error) { - cfg := data.Cfg() - initCfg, err := data.InitCfg() - if err != nil { - return nil, nil, nil, err - } - tlsBootstrapCfg, err := data.TLSBootstrapCfg() - if err != nil { - return nil, nil, nil, err - } - return cfg, initCfg, tlsBootstrapCfg, nil -} - -// writeConfigBytesToDisk writes a byte slice down to disk at the specific location of the kubelet config file -func writeConfigBytesToDisk(b []byte, kubeletDir string) error { - configFile := filepath.Join(kubeletDir, kubeadmconstants.KubeletConfigurationFileName) - fmt.Printf("[kubelet-start] Writing kubelet configuration to file %q\n", configFile) - - // creates target folder if not already exists - if err := os.MkdirAll(kubeletDir, 0700); err != nil { - return errors.Wrapf(err, "failed to create directory %q", kubeletDir) - } - - if err := ioutil.WriteFile(configFile, b, 0644); err != nil { - return errors.Wrapf(err, "failed to write kubelet configuration to the file %q", configFile) - } - return nil -} diff --git a/pkg/yurtctl/cmd/join/phases/joinnode.go b/pkg/yurtctl/cmd/join/phases/joinnode.go new file mode 100644 index 00000000000..d862b5a9c69 --- /dev/null +++ b/pkg/yurtctl/cmd/join/phases/joinnode.go @@ -0,0 +1,190 @@ +/* +Copyright 2021 The OpenYurt Authors. +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package phases + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/version" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" + + "github.com/openyurtio/openyurt/pkg/yurtctl/cmd/join/joindata" + yurtconstants "github.com/openyurtio/openyurt/pkg/yurtctl/constants" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/options" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" + kubeutil "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient" + kubeletconfig "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubelet/apis/config" + kubeletscheme "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubelet/apis/config/scheme" + kubeletcodec "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubelet/kubeletconfig/util/codec" + "github.com/openyurtio/openyurt/pkg/yurtctl/util/edgenode" + "github.com/openyurtio/openyurt/pkg/yurtctl/util/templates" +) + +// NewEdgeNodePhase creates a yurtctl workflow phase that start kubelet on a edge node. +func NewEdgeNodePhase() workflow.Phase { + return workflow.Phase{ + Name: "Join node to OpenYurt cluster. ", + Short: "Join node", + Run: runJoinNode, + InheritFlags: []string{ + options.TokenStr, + options.NodeCRISocket, + options.NodeName, + options.IgnorePreflightErrors, + }, + } +} + +// runJoinNode executes the node join process. +func runJoinNode(c workflow.RunData) error { + data, ok := c.(joindata.YurtJoinData) + if !ok { + return fmt.Errorf("Join edge-node phase invoked with an invalid data struct. ") + } + + err := writeKubeletConfigFile(data.BootstrapClient(), data) + if err != nil { + return err + } + if err := addYurthubStaticYaml(data, filepath.Join(constants.KubernetesDir, constants.ManifestsSubDirName)); err != nil { + return err + } + klog.Info("[kubelet-start] Starting the kubelet") + kubeutil.TryStartKubelet() + return nil +} + +// writeKubeletConfigFile write kubelet configuration into local disk. +func writeKubeletConfigFile(bootstrapClient *clientset.Clientset, data joindata.YurtJoinData) error { + kubeletVersion, err := version.ParseSemantic(data.KubernetesVersion()) + if err != nil { + return err + } + + // Write the configuration for the kubelet (using the bootstrap token credentials) to disk so the kubelet can start + _, err = downloadConfig(bootstrapClient, kubeletVersion, constants.KubeletRunDirectory) + if err != nil { + return err + } + + if err := kubeutil.WriteKubeletDynamicEnvFile(data, constants.KubeletRunDirectory); err != nil { + return err + } + return nil +} + +// downloadConfig downloads the kubelet configuration from a ConfigMap and writes it to disk. +// Used at "kubeadm join" time +func downloadConfig(client clientset.Interface, kubeletVersion *version.Version, kubeletDir string) (*kubeletconfig.KubeletConfiguration, error) { + // Download the ConfigMap from the cluster based on what version the kubelet is + configMapName := constants.GetKubeletConfigMapName(kubeletVersion) + + klog.Infof("[kubelet-start] Downloading configuration for the kubelet from the %q ConfigMap in the %s namespace", + configMapName, metav1.NamespaceSystem) + + kubeletCfg, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, configMapName) + // If the ConfigMap wasn't found and the kubelet version is v1.10.x, where we didn't support the config file yet + // just return, don't error out + if apierrors.IsNotFound(err) && kubeletVersion.Minor() == 10 { + return nil, nil + } + if err != nil { + return nil, err + } + + // populate static pod path of kubelet configuration in OpenYurt + _, kubeletCodecs, err := kubeletscheme.NewSchemeAndCodecs() + if err != nil { + return nil, err + } + kc, err := kubeletcodec.DecodeKubeletConfiguration(kubeletCodecs, []byte(kubeletCfg.Data[constants.KubeletBaseConfigurationConfigMapKey])) + if err != nil { + return nil, err + } + if kc.StaticPodPath == "" { + kc.StaticPodPath = filepath.Join(constants.KubernetesDir, constants.ManifestsSubDirName) + } + + data, err := kubeletcodec.EncodeKubeletConfig(kc, kubeletconfigv1beta1.SchemeGroupVersion) + if err != nil { + return nil, err + } + + return kc, writeConfigBytesToDisk(data, kubeletDir) +} + +// writeConfigBytesToDisk writes a byte slice down to disk at the specific location of the kubelet config file +func writeConfigBytesToDisk(b []byte, kubeletDir string) error { + configFile := filepath.Join(kubeletDir, constants.KubeletConfigurationFileName) + klog.Infof("[kubelet-start] Writing kubelet configuration to file %q", configFile) + + // creates target folder if not already exists + if err := os.MkdirAll(kubeletDir, 0700); err != nil { + return errors.Wrapf(err, "failed to create directory %q", kubeletDir) + } + + if err := ioutil.WriteFile(configFile, b, 0644); err != nil { + return errors.Wrapf(err, "failed to write kubelet configuration to the file %q", configFile) + } + return nil +} + +// addYurthubStaticYaml generate YurtHub static yaml for worker node. +func addYurthubStaticYaml(data joindata.YurtJoinData, podManifestPath string) error { + klog.Info("[join-node] Adding edge hub static yaml") + if _, err := os.Stat(podManifestPath); err != nil { + if os.IsNotExist(err) { + err = os.MkdirAll(podManifestPath, os.ModePerm) + if err != nil { + return err + } + } else { + klog.Errorf("Describe dir %s fail: %v", podManifestPath, err) + return err + } + } + + ctx := map[string]string{ + "kubernetesServerAddr": fmt.Sprintf("https://%s", data.ServerAddr()), + "image": data.YurtHubImage(), + "joinToken": data.JoinToken(), + "workingMode": data.NodeRegistration().WorkingMode, + "organizations": data.NodeRegistration().Organizations, + } + + yurthubTemplate, err := templates.SubsituteTemplate(edgenode.YurthubTemplate, ctx) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(podManifestPath, yurtconstants.YurthubStaticPodFileName), []byte(yurthubTemplate), 0600); err != nil { + return err + } + klog.Info("[join-node] Add hub agent static yaml is ok") + return nil +} diff --git a/pkg/yurtctl/cmd/join/phases/postcheck.go b/pkg/yurtctl/cmd/join/phases/postcheck.go index 600c6528931..563c934dcd9 100644 --- a/pkg/yurtctl/cmd/join/phases/postcheck.go +++ b/pkg/yurtctl/cmd/join/phases/postcheck.go @@ -20,21 +20,20 @@ import ( "fmt" "io/ioutil" "net/http" + "path/filepath" "time" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - patchnodephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/patchnode" - "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" - "k8s.io/kubernetes/cmd/kubeadm/app/util/initsystem" - kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" - - "github.com/openyurtio/openyurt/pkg/projectinfo" - "github.com/openyurtio/openyurt/pkg/yurtctl/constants" + + "github.com/openyurtio/openyurt/pkg/yurtctl/cmd/join/joindata" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/options" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig" "github.com/openyurtio/openyurt/pkg/yurtctl/util/edgenode" ) @@ -43,13 +42,16 @@ func NewPostcheckPhase() workflow.Phase { return workflow.Phase{ Name: "postcheck", Short: "postcheck", - Run: runPostcheck, + Run: runPostCheck, + InheritFlags: []string{ + options.TokenStr, + }, } } -//runPostcheck executes the node health check process. -func runPostcheck(c workflow.RunData) error { - j, ok := c.(YurtJoinData) +// runPostCheck executes the node health check process. +func runPostCheck(c workflow.RunData) error { + j, ok := c.(joindata.YurtJoinData) if !ok { return fmt.Errorf("Postcheck edge-node phase invoked with an invalid data struct. ") } @@ -58,16 +60,16 @@ func runPostcheck(c workflow.RunData) error { if err := checkKubeletStatus(); err != nil { return err } + klog.V(1).Infof("kubelet service is active") - cfg := j.Cfg() - if j.NodeType() == constants.EdgeNode { - klog.V(1).Infof("waiting yurt hub ready.") - if err := checkYurthubHealthz(); err != nil { - return err - } - return patchEdgeNode(cfg) + klog.V(1).Infof("waiting hub agent ready.") + if err := checkYurthubHealthz(); err != nil { + return err } - return patchCloudNode(cfg) + klog.V(1).Infof("hub agent is ready") + + nodeRegistration := j.NodeRegistration() + return patchNode(nodeRegistration.Name, nodeRegistration.CRISocket) } //checkKubeletStatus check if kubelet is healthy. @@ -102,36 +104,17 @@ func checkYurthubHealthz() error { }) } -//patchEdgeNode patch labels and annotations for edge-node. -func patchEdgeNode(cfg *kubeadm.JoinConfiguration) error { - client, err := kubeconfigutil.ClientSetFromFile(kubeadmconstants.GetKubeletKubeConfigPath()) +//patchNode patch annotations for worker node. +func patchNode(nodeName, criSocket string) error { + client, err := kubeconfig.ClientSetFromFile(filepath.Join(constants.KubernetesDir, constants.KubeletKubeConfigFileName)) if err != nil { return err } - if err := patchnodephase.AnnotateCRISocket(client, cfg.NodeRegistration.Name, cfg.NodeRegistration.CRISocket); err != nil { - return err - } - if err := apiclient.PatchNode(client, cfg.NodeRegistration.Name, func(n *v1.Node) { - n.Labels[projectinfo.GetEdgeWorkerLabelKey()] = "true" - }); err != nil { - return err - } - return nil -} -//patchCloudNode patch labels and annotations for cloud-node. -func patchCloudNode(cfg *kubeadm.JoinConfiguration) error { - client, err := kubeconfigutil.ClientSetFromFile(kubeadmconstants.GetKubeletKubeConfigPath()) - if err != nil { - return err - } - if err := patchnodephase.AnnotateCRISocket(client, cfg.NodeRegistration.Name, cfg.NodeRegistration.CRISocket); err != nil { - return err - } - if err := apiclient.PatchNode(client, cfg.NodeRegistration.Name, func(n *v1.Node) { - n.Labels[projectinfo.GetEdgeWorkerLabelKey()] = "false" - }); err != nil { - return err - } - return nil + return apiclient.PatchNode(client, nodeName, func(n *v1.Node) { + if n.ObjectMeta.Annotations == nil { + n.ObjectMeta.Annotations = make(map[string]string) + } + n.ObjectMeta.Annotations[constants.AnnotationKubeadmCRISocket] = criSocket + }) } diff --git a/pkg/yurtctl/cmd/join/phases/preflight.go b/pkg/yurtctl/cmd/join/phases/preflight.go new file mode 100644 index 00000000000..0dc31dfdf02 --- /dev/null +++ b/pkg/yurtctl/cmd/join/phases/preflight.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package phases + +import ( + "github.com/pkg/errors" + "k8s.io/klog/v2" + utilsexec "k8s.io/utils/exec" + + "github.com/openyurtio/openyurt/pkg/yurtctl/cmd/join/joindata" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/options" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/preflight" +) + +// NewPreflightPhase creates a kubeadm workflow phase that implements preflight checks for a new node join +func NewPreflightPhase() workflow.Phase { + return workflow.Phase{ + Name: "preflight [api-server-endpoint]", + Short: "Run join pre-flight checks", + Long: "Run pre-flight checks for kubeadm join.", + Run: runPreflight, + InheritFlags: []string{ + options.TokenStr, + options.NodeCRISocket, + options.NodeName, + options.IgnorePreflightErrors, + }, + } +} + +// runPreflight executes preflight checks logic. +func runPreflight(c workflow.RunData) error { + data, ok := c.(joindata.YurtJoinData) + if !ok { + return errors.New("preflight phase invoked with an invalid data struct") + } + + // Start with general checks + klog.V(1).Infoln("[preflight] Running general checks") + if err := preflight.RunJoinNodeChecks(utilsexec.New(), data); err != nil { + return err + } + + return nil +} diff --git a/pkg/yurtctl/cmd/join/phases/prepare.go b/pkg/yurtctl/cmd/join/phases/prepare.go index bd4bcb148db..aee174f0e86 100644 --- a/pkg/yurtctl/cmd/join/phases/prepare.go +++ b/pkg/yurtctl/cmd/join/phases/prepare.go @@ -18,32 +18,42 @@ package phases import ( "fmt" + "os" + "path/filepath" - "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" + "k8s.io/klog/v2" + "github.com/openyurtio/openyurt/pkg/yurtctl/cmd/join/joindata" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/options" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" "github.com/openyurtio/openyurt/pkg/yurtctl/util/kubernetes" "github.com/openyurtio/openyurt/pkg/yurtctl/util/system" ) -// NewEdgeNodePhase creates a yurtctl workflow phase that initialize the node environment. +// NewPreparePhase creates a yurtctl workflow phase that initialize the node environment. func NewPreparePhase() workflow.Phase { return workflow.Phase{ Name: "Initialize system environment.", Short: "Initialize system environment.", Run: runPrepare, + InheritFlags: []string{ + options.TokenStr, + }, } } //runPrepare executes the node initialization process. func runPrepare(c workflow.RunData) error { - data, ok := c.(YurtJoinData) + data, ok := c.(joindata.YurtJoinData) if !ok { return fmt.Errorf("Prepare phase invoked with an invalid data struct. ") } - initCfg, err := data.InitCfg() - if err != nil { - return err + // cleanup at first + staticPodsPath := filepath.Join(constants.KubernetesDir, constants.ManifestsSubDirName) + if err := os.RemoveAll(staticPodsPath); err != nil { + klog.Warningf("remove %s: %v", staticPodsPath, err) } if err := system.SetIpv4Forward(); err != nil { @@ -55,13 +65,19 @@ func runPrepare(c workflow.RunData) error { if err := system.SetSELinux(); err != nil { return err } - if err := kubernetes.CheckAndInstallKubelet(initCfg.ClusterConfiguration.KubernetesVersion); err != nil { + if err := kubernetes.CheckAndInstallKubelet(data.KubernetesVersion()); err != nil { return err } if err := kubernetes.SetKubeletService(); err != nil { return err } - if err := kubernetes.SetKubeletUnitConfig(data.NodeType()); err != nil { + if err := kubernetes.SetKubeletUnitConfig(); err != nil { + return err + } + if err := kubernetes.SetKubeletConfigForNode(); err != nil { + return err + } + if err := kubernetes.SetKubeletCaCert(data.TLSBootstrapCfg()); err != nil { return err } return nil diff --git a/pkg/yurtctl/cmd/reset/phases/cleanupnode.go b/pkg/yurtctl/cmd/reset/phases/cleanupnode.go new file mode 100644 index 00000000000..f17c70b9d33 --- /dev/null +++ b/pkg/yurtctl/cmd/reset/phases/cleanupnode.go @@ -0,0 +1,154 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package phases + +import ( + "errors" + "os" + "path/filepath" + + "k8s.io/klog/v2" + utilsexec "k8s.io/utils/exec" + + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/options" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow" + kubeadmconstants "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" + kubeutil "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet" + utilruntime "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime" +) + +// NewCleanupNodePhase creates a kubeadm workflow phase that cleanup the node +func NewCleanupNodePhase() workflow.Phase { + return workflow.Phase{ + Name: "cleanup-node", + Aliases: []string{"cleanupnode"}, + Short: "Run cleanup node.", + Run: runCleanupNode, + InheritFlags: []string{ + options.NodeCRISocket, + }, + } +} + +func runCleanupNode(c workflow.RunData) error { + r, ok := c.(resetData) + if !ok { + return errors.New("cleanup-node phase invoked with an invalid data struct") + } + + // Try to stop the kubelet service + klog.Infoln("[reset] Stopping the kubelet service") + kubeutil.TryStopKubelet() + + // Try to unmount mounted directories under kubeadmconstants.KubeletRunDirectory in order to be able to remove the kubeadmconstants.KubeletRunDirectory directory later + klog.Infof("[reset] Unmounting mounted directories in %q", kubeadmconstants.KubeletRunDirectory) + // In case KubeletRunDirectory holds a symbolic link, evaluate it + kubeletRunDir, err := absoluteKubeletRunDirectory() + if err == nil { + // Only clean absoluteKubeletRunDirectory if umountDirsCmd passed without error + r.AddDirsToClean(kubeletRunDir) + } + + klog.V(1).Info("[reset] Removing Kubernetes-managed containers") + if err := removeContainers(utilsexec.New(), r.CRISocketPath()); err != nil { + klog.Warningf("[reset] Failed to remove containers: %v", err) + } + + r.AddDirsToClean("/var/lib/dockershim", "/var/run/kubernetes", "/var/lib/cni") + + // Remove contents from the config and pki directories + klog.V(1).Infoln("[reset] Removing contents from the config and pki directories") + certsDir := filepath.Join(kubeadmconstants.KubernetesDir, "pki") + resetConfigDir(kubeadmconstants.KubernetesDir, certsDir) + + return nil +} + +func absoluteKubeletRunDirectory() (string, error) { + absoluteKubeletRunDirectory, err := filepath.EvalSymlinks(kubeadmconstants.KubeletRunDirectory) + if err != nil { + klog.Warningf("[reset] Failed to evaluate the %q directory. Skipping its unmount and cleanup: %v", kubeadmconstants.KubeletRunDirectory, err) + return "", err + } + err = unmountKubeletDirectory(absoluteKubeletRunDirectory) + if err != nil { + klog.Warningf("[reset] Failed to unmount mounted directories in %s", kubeadmconstants.KubeletRunDirectory) + return "", err + } + return absoluteKubeletRunDirectory, nil +} + +func removeContainers(execer utilsexec.Interface, criSocketPath string) error { + containerRuntime, err := utilruntime.NewContainerRuntime(execer, criSocketPath) + if err != nil { + return err + } + containers, err := containerRuntime.ListKubeContainers() + if err != nil { + return err + } + return containerRuntime.RemoveContainers(containers) +} + +// resetConfigDir is used to cleanup the files kubeadm writes in /etc/kubernetes/. +func resetConfigDir(configPathDir, pkiPathDir string) { + dirsToClean := []string{ + filepath.Join(configPathDir, kubeadmconstants.ManifestsSubDirName), + pkiPathDir, + } + klog.Infof("[reset] Deleting contents of config directories: %v", dirsToClean) + for _, dir := range dirsToClean { + if err := CleanDir(dir); err != nil { + klog.Warningf("[reset] Failed to delete contents of %q directory: %v", dir, err) + } + } + + filesToClean := []string{ + filepath.Join(configPathDir, kubeadmconstants.KubeletKubeConfigFileName), + } + klog.Infof("[reset] Deleting files: %v", filesToClean) + for _, path := range filesToClean { + if err := os.RemoveAll(path); err != nil { + klog.Warningf("[reset] Failed to remove file: %q [%v]", path, err) + } + } +} + +// CleanDir removes everything in a directory, but not the directory itself +func CleanDir(filePath string) error { + // If the directory doesn't even exist there's nothing to do, and we do + // not consider this an error + if _, err := os.Stat(filePath); os.IsNotExist(err) { + return nil + } + + d, err := os.Open(filePath) + if err != nil { + return err + } + defer d.Close() + names, err := d.Readdirnames(-1) + if err != nil { + return err + } + for _, name := range names { + if err = os.RemoveAll(filepath.Join(filePath, name)); err != nil { + return err + } + } + return nil +} diff --git a/pkg/yurtctl/cmd/reset/phases/cleanfile.go b/pkg/yurtctl/cmd/reset/phases/cleanyurtfile.go similarity index 85% rename from pkg/yurtctl/cmd/reset/phases/cleanfile.go rename to pkg/yurtctl/cmd/reset/phases/cleanyurtfile.go index e4e0875c505..46723107f5c 100644 --- a/pkg/yurtctl/cmd/reset/phases/cleanfile.go +++ b/pkg/yurtctl/cmd/reset/phases/cleanyurtfile.go @@ -21,16 +21,16 @@ import ( "os" "k8s.io/klog/v2" - "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" "github.com/openyurtio/openyurt/pkg/yurtctl/constants" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow" "github.com/openyurtio/openyurt/pkg/yurtctl/util/edgenode" ) -func NewCleanfilePhase() workflow.Phase { +func NewCleanYurtFilePhase() workflow.Phase { return workflow.Phase{ - Name: "Clean up the directories and files related to kubelet and yurthub.", - Short: "Clean up the directories and files related to kubelet and yurthub.", + Name: "Clean up the directories and files related to openyurt.", + Short: "Clean up the directories and files related to openyurt.", Run: runCleanfile, } } diff --git a/pkg/yurtctl/cmd/join/phases/constants.go b/pkg/yurtctl/cmd/reset/phases/data.go similarity index 57% rename from pkg/yurtctl/cmd/join/phases/constants.go rename to pkg/yurtctl/cmd/reset/phases/data.go index 158e25c1568..47a6e19df6f 100644 --- a/pkg/yurtctl/cmd/join/phases/constants.go +++ b/pkg/yurtctl/cmd/reset/phases/data.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The OpenYurt Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,25 +16,18 @@ limitations under the License. package phases -const ( - defaultYurthubStaticPodFileName = "yurthub.yaml" -) +import ( + "io" -const ( - kubeletConfForEdgeNode = ` -apiVersion: v1 -clusters: -- cluster: - server: http://127.0.0.1:10261 - name: default-cluster -contexts: -- context: - cluster: default-cluster - namespace: default - user: default-auth - name: default-context -current-context: default-context -kind: Config -preferences: {} -` + "k8s.io/apimachinery/pkg/util/sets" ) + +// resetData is the interface to use for reset phases. +// The "resetData" type from "cmd/reset.go" must satisfy this interface. +type resetData interface { + ForceReset() bool + InputReader() io.Reader + IgnorePreflightErrors() sets.String + AddDirsToClean(dirs ...string) + CRISocketPath() string +} diff --git a/pkg/yurtctl/cmd/reset/phases/preflight.go b/pkg/yurtctl/cmd/reset/phases/preflight.go new file mode 100644 index 00000000000..ddb7453cebb --- /dev/null +++ b/pkg/yurtctl/cmd/reset/phases/preflight.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package phases + +import ( + "bufio" + "errors" + "strings" + + "k8s.io/klog/v2" + + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/options" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/preflight" +) + +// NewPreflightPhase creates a kubeadm workflow phase implements preflight checks for reset +func NewPreflightPhase() workflow.Phase { + return workflow.Phase{ + Name: "preflight", + Aliases: []string{"pre-flight"}, + Short: "Run reset pre-flight checks", + Long: "Run pre-flight checks for kubeadm reset.", + Run: runPreflight, + InheritFlags: []string{ + options.IgnorePreflightErrors, + options.ForceReset, + }, + } +} + +// runPreflight executes preflight checks logic. +func runPreflight(c workflow.RunData) error { + r, ok := c.(resetData) + if !ok { + return errors.New("preflight phase invoked with an invalid data struct") + } + + if !r.ForceReset() { + klog.Infoln("[reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.") + klog.Info("[reset] Are you sure you want to proceed? [y/N]: ") + s := bufio.NewScanner(r.InputReader()) + s.Scan() + if err := s.Err(); err != nil { + return err + } + if strings.ToLower(s.Text()) != "y" { + return errors.New("aborted reset operation") + } + } + + klog.Infoln("[preflight] Running pre-flight checks") + return preflight.RunRootCheckOnly(r.IgnorePreflightErrors()) +} diff --git a/pkg/yurtctl/cmd/reset/phases/unmount.go b/pkg/yurtctl/cmd/reset/phases/unmount.go new file mode 100644 index 00000000000..ab0f81bb118 --- /dev/null +++ b/pkg/yurtctl/cmd/reset/phases/unmount.go @@ -0,0 +1,29 @@ +// +build !linux + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package phases + +import ( + "k8s.io/klog/v2" +) + +// unmountKubeletDirectory is a NOOP on all but linux. +func unmountKubeletDirectory(absoluteKubeletRunDirectory string) error { + klog.Warning("Cannot unmount filesystems on current OS, all mounted file systems will need to be manually unmounted") + return nil +} diff --git a/pkg/yurtctl/cmd/reset/phases/unmount_linux.go b/pkg/yurtctl/cmd/reset/phases/unmount_linux.go new file mode 100644 index 00000000000..84ba5db2e55 --- /dev/null +++ b/pkg/yurtctl/cmd/reset/phases/unmount_linux.go @@ -0,0 +1,46 @@ +// +build linux + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package phases + +import ( + "io/ioutil" + "strings" + "syscall" + + "k8s.io/klog/v2" +) + +// unmountKubeletDirectory unmounts all paths that contain KubeletRunDirectory +func unmountKubeletDirectory(absoluteKubeletRunDirectory string) error { + raw, err := ioutil.ReadFile("/proc/mounts") + if err != nil { + return err + } + mounts := strings.Split(string(raw), "\n") + for _, mount := range mounts { + m := strings.Split(mount, " ") + if len(m) < 2 || !strings.HasPrefix(m[1], absoluteKubeletRunDirectory) { + continue + } + if err := syscall.Unmount(m[1], 0); err != nil { + klog.Warningf("[reset] Failed to unmount mounted directory in %s: %s", absoluteKubeletRunDirectory, m[1]) + } + } + return nil +} diff --git a/pkg/yurtctl/cmd/reset/reset.go b/pkg/yurtctl/cmd/reset/reset.go index 616748f1a16..2344dfac172 100644 --- a/pkg/yurtctl/cmd/reset/reset.go +++ b/pkg/yurtctl/cmd/reset/reset.go @@ -18,30 +18,19 @@ limitations under the License. package reset import ( - "fmt" "io" - "os" + "strings" "github.com/lithammer/dedent" "github.com/spf13/cobra" flag "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/util/sets" - clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2" - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" - "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" - kubeadmphases "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/reset" - "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" - cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" - configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" - kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" - utilruntime "k8s.io/kubernetes/cmd/kubeadm/app/util/runtime" yurtphases "github.com/openyurtio/openyurt/pkg/yurtctl/cmd/reset/phases" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/options" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow" + utilruntime "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime" ) var ( @@ -63,63 +52,35 @@ var ( // resetOptions defines all the options exposed via flags by kubeadm reset. type resetOptions struct { - certificatesDir string criSocketPath string forceReset bool ignorePreflightErrors []string - kubeconfigPath string } // resetData defines all the runtime information used when running the kubeadm reset workflow; // this data is shared across all the phases that are included in the workflow. type resetData struct { - certificatesDir string - client clientset.Interface criSocketPath string forceReset bool ignorePreflightErrors sets.String inputReader io.Reader outputWriter io.Writer - cfg *kubeadmapi.InitConfiguration dirsToClean []string } // newResetOptions returns a struct ready for being used for creating cmd join flags. func newResetOptions() *resetOptions { return &resetOptions{ - certificatesDir: kubeadmapiv1beta2.DefaultCertificatesDir, - forceReset: false, - kubeconfigPath: kubeadmconstants.GetAdminKubeConfigPath(), + forceReset: false, } } // newResetData returns a new resetData struct to be used for the execution of the kubeadm reset workflow. func newResetData(cmd *cobra.Command, options *resetOptions, in io.Reader, out io.Writer) (*resetData, error) { - var cfg *kubeadmapi.InitConfiguration - - client, err := getClientset(options.kubeconfigPath, false) - if err == nil { - klog.V(1).Infof("[reset] Loaded client set from kubeconfig file: %s", options.kubeconfigPath) - cfg, err = configutil.FetchInitConfigurationFromCluster(client, out, "reset", false) - if err != nil { - klog.Warningf("[reset] Unable to fetch the kubeadm-config ConfigMap from cluster: %v", err) - } - } else { - klog.V(1).Infof("[reset] Could not obtain a client set from the kubeconfig file: %s", options.kubeconfigPath) - } - - ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(options.ignorePreflightErrors, ignorePreflightErrors(cfg)) - if err != nil { - return nil, err - } - if cfg != nil { - // Also set the union of pre-flight errors to InitConfiguration, to provide a consistent view of the runtime configuration: - cfg.NodeRegistration.IgnorePreflightErrors = ignorePreflightErrorsSet.List() - } - var criSocketPath string + var err error if options.criSocketPath == "" { - criSocketPath, err = resetDetectCRISocket(cfg) + criSocketPath, err = utilruntime.DetectCRISocket() if err != nil { return nil, err } @@ -129,42 +90,37 @@ func newResetData(cmd *cobra.Command, options *resetOptions, in io.Reader, out i klog.V(1).Infof("[reset] Using specified CRI socket: %s", criSocketPath) } + var ignoreErrors sets.String + for _, item := range options.ignorePreflightErrors { + ignoreErrors.Insert(strings.ToLower(item)) + } + return &resetData{ - certificatesDir: options.certificatesDir, - client: client, criSocketPath: criSocketPath, forceReset: options.forceReset, - ignorePreflightErrors: ignorePreflightErrorsSet, + ignorePreflightErrors: ignoreErrors, inputReader: in, outputWriter: out, - cfg: cfg, }, nil } -func ignorePreflightErrors(cfg *kubeadmapi.InitConfiguration) []string { - if cfg == nil { - return []string{} - } - return cfg.NodeRegistration.IgnorePreflightErrors -} - // AddResetFlags adds reset flags func AddResetFlags(flagSet *flag.FlagSet, resetOptions *resetOptions) { - flagSet.StringVar( - &resetOptions.certificatesDir, options.CertificatesDir, resetOptions.certificatesDir, - `The path to the directory where the certificates are stored. If specified, clean this directory.`, - ) flagSet.BoolVarP( &resetOptions.forceReset, options.ForceReset, "f", false, "Reset the node without prompting for confirmation.", ) - - options.AddKubeConfigFlag(flagSet, &resetOptions.kubeconfigPath) - options.AddIgnorePreflightErrorsFlag(flagSet, &resetOptions.ignorePreflightErrors) - cmdutil.AddCRISocketFlag(flagSet, &resetOptions.criSocketPath) + flagSet.StringVar( + &resetOptions.criSocketPath, options.NodeCRISocket, resetOptions.criSocketPath, + "Path to the CRI socket to connect", + ) + flagSet.StringSliceVar( + &resetOptions.ignorePreflightErrors, options.IgnorePreflightErrors, resetOptions.ignorePreflightErrors, + "A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.", + ) } -// NewCmdReset returns the "kubeadm reset" command +// NewCmdReset returns the "yurtctl reset" command func NewCmdReset(in io.Reader, out io.Writer, resetOptions *resetOptions) *cobra.Command { if resetOptions == nil { resetOptions = newResetOptions() @@ -173,7 +129,7 @@ func NewCmdReset(in io.Reader, out io.Writer, resetOptions *resetOptions) *cobra cmd := &cobra.Command{ Use: "reset", - Short: "Performs a best effort revert of changes made to this host by 'kubeadm init' or 'kubeadm join'", + Short: "Performs a best effort revert of changes made to this host by 'yurtctl join'", RunE: func(cmd *cobra.Command, args []string) error { c, err := resetRunner.InitData(args) if err != nil { @@ -190,9 +146,9 @@ func NewCmdReset(in io.Reader, out io.Writer, resetOptions *resetOptions) *cobra cleanDirs(data) // output help text instructing user how to remove cni folders - fmt.Print(cniCleanupInstructions) + klog.Info(cniCleanupInstructions) // Output help text instructing user how to remove iptables rules - fmt.Print(iptablesCleanupInstructions) + klog.Info(iptablesCleanupInstructions) return nil }, } @@ -200,11 +156,9 @@ func NewCmdReset(in io.Reader, out io.Writer, resetOptions *resetOptions) *cobra AddResetFlags(cmd.Flags(), resetOptions) // initialize the workflow runner with the list of phases - resetRunner.AppendPhase(kubeadmphases.NewPreflightPhase()) - resetRunner.AppendPhase(kubeadmphases.NewUpdateClusterStatus()) - resetRunner.AppendPhase(kubeadmphases.NewRemoveETCDMemberPhase()) - resetRunner.AppendPhase(kubeadmphases.NewCleanupNodePhase()) - resetRunner.AppendPhase(yurtphases.NewCleanfilePhase()) + resetRunner.AppendPhase(yurtphases.NewPreflightPhase()) + resetRunner.AppendPhase(yurtphases.NewCleanupNodePhase()) + resetRunner.AppendPhase(yurtphases.NewCleanYurtFilePhase()) // sets the data builder function, that will be used by the runner // both when running the entire workflow or single phases @@ -220,30 +174,15 @@ func NewCmdReset(in io.Reader, out io.Writer, resetOptions *resetOptions) *cobra } func cleanDirs(data *resetData) { - fmt.Printf("[reset] Deleting contents of stateful directories: %v\n", data.dirsToClean) + klog.Infof("[reset] Deleting contents of stateful directories: %v\n", data.dirsToClean) for _, dir := range data.dirsToClean { klog.V(1).Infof("[reset] Deleting contents of %s", dir) - if err := kubeadmphases.CleanDir(dir); err != nil { + if err := yurtphases.CleanDir(dir); err != nil { klog.Warningf("[reset] Failed to delete contents of %q directory: %v", dir, err) } } } -// Cfg returns the InitConfiguration. -func (r *resetData) Cfg() *kubeadmapi.InitConfiguration { - return r.cfg -} - -// CertificatesDir returns the CertificatesDir. -func (r *resetData) CertificatesDir() string { - return r.certificatesDir -} - -// Client returns the Client for accessing the cluster. -func (r *resetData) Client() clientset.Interface { - return r.client -} - // ForceReset returns the forceReset flag. func (r *resetData) ForceReset() bool { return r.forceReset @@ -268,24 +207,3 @@ func (r *resetData) AddDirsToClean(dirs ...string) { func (r *resetData) CRISocketPath() string { return r.criSocketPath } - -func resetDetectCRISocket(cfg *kubeadmapi.InitConfiguration) (string, error) { - if cfg != nil { - // first try to get the CRI socket from the cluster configuration - return cfg.NodeRegistration.CRISocket, nil - } - - // if this fails, try to detect it - return utilruntime.DetectCRISocket() -} - -func getClientset(file string, dryRun bool) (clientset.Interface, error) { - if dryRun { - dryRunGetter, err := apiclient.NewClientBackedDryRunGetterFromKubeconfig(file) - if err != nil { - return nil, err - } - return apiclient.NewDryRunClient(dryRunGetter, os.Stdout), nil - } - return kubeconfigutil.ClientSetFromFile(file) -} diff --git a/pkg/yurtctl/cmd/yurtinit/phases/prepare.go b/pkg/yurtctl/cmd/yurtinit/phases/prepare.go index dbe09dea2e7..6facfaeb2ed 100644 --- a/pkg/yurtctl/cmd/yurtinit/phases/prepare.go +++ b/pkg/yurtctl/cmd/yurtinit/phases/prepare.go @@ -21,7 +21,6 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" - "github.com/openyurtio/openyurt/pkg/yurtctl/constants" "github.com/openyurtio/openyurt/pkg/yurtctl/util/kubernetes" "github.com/openyurtio/openyurt/pkg/yurtctl/util/system" ) @@ -58,7 +57,7 @@ func runPrepare(c workflow.RunData) error { if err := kubernetes.SetKubeletService(); err != nil { return err } - if err := kubernetes.SetKubeletUnitConfig(constants.CloudNode); err != nil { + if err := kubernetes.SetKubeletUnitConfig(); err != nil { return err } return nil diff --git a/pkg/yurtctl/constants/constants.go b/pkg/yurtctl/constants/constants.go index 62c212b115d..ffb848b78b7 100644 --- a/pkg/yurtctl/constants/constants.go +++ b/pkg/yurtctl/constants/constants.go @@ -17,12 +17,15 @@ limitations under the License. package constants import ( + "k8s.io/apimachinery/pkg/util/version" + "github.com/openyurtio/openyurt/pkg/projectinfo" ) var ( // AnnotationAutonomy is used to identify if a node is autonomous - AnnotationAutonomy = projectinfo.GetAutonomyAnnotation() + AnnotationAutonomy = projectinfo.GetAutonomyAnnotation() + MinimumKubeletVersion = version.MustParseSemantic("v1.17.0") ) const ( @@ -35,24 +38,26 @@ const ( YurttunnelAgentComponentName = "yurt-tunnel-agent" YurttunnelNamespace = "kube-system" - Sysctl_k8s_config = "/etc/sysctl.d/k8s.conf" - StaticPodPath = "/etc/kubernetes/manifests" - KubeletConfigureDir = "/etc/kubernetes" - KubeletWorkdir = "/var/lib/kubelet" - YurtHubWorkdir = "/var/lib/yurthub" - YurttunnelAgentWorkdir = "/var/lib/yurttunnel-agent" - YurttunnelServerWorkdir = "/var/lib/yurttunnel-server" - KubeCniDir = "/opt/cni/bin" - KubeCniVersion = "v0.8.0" - KubeletServiceFilepath string = "/etc/systemd/system/kubelet.service" + Sysctl_k8s_config = "/etc/sysctl.d/k8s.conf" + KubeletConfigureDir = "/etc/kubernetes" + KubeletWorkdir = "/var/lib/kubelet" + YurtHubWorkdir = "/var/lib/yurthub" + YurttunnelAgentWorkdir = "/var/lib/yurttunnel-agent" + YurttunnelServerWorkdir = "/var/lib/yurttunnel-server" + KubeCniDir = "/opt/cni/bin" + KubeCniVersion = "v0.8.0" + KubeletServiceFilepath = "/etc/systemd/system/kubelet.service" + KubeletServiceConfPath = "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf" + YurthubStaticPodFileName = "yurthub.yaml" + PauseImagePath = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2" CniUrlFormat = "https://aliacs-edge-k8s-cn-hangzhou.oss-cn-hangzhou.aliyuncs.com/public/pkg/openyurt/cni/%s/cni-plugins-linux-%s-%s.tgz" KubeUrlFormat = "https://dl.k8s.io/%s/kubernetes-node-linux-%s.tar.gz" TmpDownloadDir = "/tmp" FlannelIntallFile = "https://aliacs-edge-k8s-cn-hangzhou.oss-cn-hangzhou.aliyuncs.com/public/pkg/openyurt/flannel.yaml" - EdgeNode = "edge-node" - CloudNode = "cloud-node" + EdgeNode = "edge" + CloudNode = "cloud" DefaultOpenYurtImageRegistry = "registry.cn-hangzhou.aliyuncs.com/openyurt" DefaultOpenYurtVersion = "latest" @@ -76,7 +81,7 @@ RestartSec=10 [Install] WantedBy=multi-user.target` - EdgeKubeletUnitConfig = ` + KubeletUnitConfig = ` [Service] Environment="KUBELET_KUBECONFIG_ARGS=--kubeconfig=/etc/kubernetes/kubelet.conf" Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" @@ -85,14 +90,22 @@ EnvironmentFile=-/etc/default/kubelet ExecStart= ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS ` - CloudKubeletUnitConfig = ` -[Service] -Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" -Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" -EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env -EnvironmentFile=-/etc/default/kubelet -ExecStart= -ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS + + KubeletConfForNode = ` +apiVersion: v1 +clusters: +- cluster: + server: http://127.0.0.1:10261 + name: default-cluster +contexts: +- context: + cluster: default-cluster + namespace: default + user: default-auth + name: default-context +current-context: default-context +kind: Config +preferences: {} ` YurtControllerManagerServiceAccount = ` diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/bootstraptokenhelpers.go b/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/bootstraptokenhelpers.go new file mode 100644 index 00000000000..119549c7bb6 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/bootstraptokenhelpers.go @@ -0,0 +1,158 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +import ( + "sort" + "strings" + "time" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + bootstrapapi "k8s.io/cluster-bootstrap/token/api" + bootstraputil "k8s.io/cluster-bootstrap/token/util" + bootstrapsecretutil "k8s.io/cluster-bootstrap/util/secrets" +) + +// ToSecret converts the given BootstrapToken object to its Secret representation that +// may be submitted to the API Server in order to be stored. +func (bt *BootstrapToken) ToSecret() *v1.Secret { + return &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: bootstraputil.BootstrapTokenSecretName(bt.Token.ID), + Namespace: metav1.NamespaceSystem, + }, + Type: v1.SecretType(bootstrapapi.SecretTypeBootstrapToken), + Data: encodeTokenSecretData(bt, time.Now()), + } +} + +// encodeTokenSecretData takes the token discovery object and an optional duration and returns the .Data for the Secret +// now is passed in order to be able to used in unit testing +func encodeTokenSecretData(token *BootstrapToken, now time.Time) map[string][]byte { + data := map[string][]byte{ + bootstrapapi.BootstrapTokenIDKey: []byte(token.Token.ID), + bootstrapapi.BootstrapTokenSecretKey: []byte(token.Token.Secret), + } + + if len(token.Description) > 0 { + data[bootstrapapi.BootstrapTokenDescriptionKey] = []byte(token.Description) + } + + // If for some strange reason both token.TTL and token.Expires would be set + // (they are mutually exclusive in validation so this shouldn't be the case), + // token.Expires has higher priority, as can be seen in the logic here. + if token.Expires != nil { + // Format the expiration date accordingly + // TODO: This maybe should be a helper function in bootstraputil? + expirationString := token.Expires.Time.Format(time.RFC3339) + data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(expirationString) + + } else if token.TTL != nil && token.TTL.Duration > 0 { + // Only if .Expires is unset, TTL might have an effect + // Get the current time, add the specified duration, and format it accordingly + expirationString := now.Add(token.TTL.Duration).Format(time.RFC3339) + data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(expirationString) + } + + for _, usage := range token.Usages { + data[bootstrapapi.BootstrapTokenUsagePrefix+usage] = []byte("true") + } + + if len(token.Groups) > 0 { + data[bootstrapapi.BootstrapTokenExtraGroupsKey] = []byte(strings.Join(token.Groups, ",")) + } + return data +} + +// BootstrapTokenFromSecret returns a BootstrapToken object from the given Secret +func BootstrapTokenFromSecret(secret *v1.Secret) (*BootstrapToken, error) { + // Get the Token ID field from the Secret data + tokenID := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenIDKey) + if len(tokenID) == 0 { + return nil, errors.Errorf("bootstrap Token Secret has no token-id data: %s", secret.Name) + } + + // Enforce the right naming convention + if secret.Name != bootstraputil.BootstrapTokenSecretName(tokenID) { + return nil, errors.Errorf("bootstrap token name is not of the form '%s(token-id)'. Actual: %q. Expected: %q", + bootstrapapi.BootstrapTokenSecretPrefix, secret.Name, bootstraputil.BootstrapTokenSecretName(tokenID)) + } + + tokenSecret := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenSecretKey) + if len(tokenSecret) == 0 { + return nil, errors.Errorf("bootstrap Token Secret has no token-secret data: %s", secret.Name) + } + + // Create the BootstrapTokenString object based on the ID and Secret + bts, err := NewBootstrapTokenStringFromIDAndSecret(tokenID, tokenSecret) + if err != nil { + return nil, errors.Wrap(err, "bootstrap Token Secret is invalid and couldn't be parsed") + } + + // Get the description (if any) from the Secret + description := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenDescriptionKey) + + // Expiration time is optional, if not specified this implies the token + // never expires. + secretExpiration := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenExpirationKey) + var expires *metav1.Time + if len(secretExpiration) > 0 { + expTime, err := time.Parse(time.RFC3339, secretExpiration) + if err != nil { + return nil, errors.Wrapf(err, "can't parse expiration time of bootstrap token %q", secret.Name) + } + expires = &metav1.Time{Time: expTime} + } + + // Build an usages string slice from the Secret data + var usages []string + for k, v := range secret.Data { + // Skip all fields that don't include this prefix + if !strings.HasPrefix(k, bootstrapapi.BootstrapTokenUsagePrefix) { + continue + } + // Skip those that don't have this usage set to true + if string(v) != "true" { + continue + } + usages = append(usages, strings.TrimPrefix(k, bootstrapapi.BootstrapTokenUsagePrefix)) + } + // Only sort the slice if defined + if usages != nil { + sort.Strings(usages) + } + + // Get the extra groups information from the Secret + // It's done this way to make .Groups be nil in case there is no items, rather than an + // empty slice or an empty slice with a "" string only + var groups []string + groupsString := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenExtraGroupsKey) + g := strings.Split(groupsString, ",") + if len(g) > 0 && len(g[0]) > 0 { + groups = g + } + + return &BootstrapToken{ + Token: bts, + Description: description, + Expires: expires, + Usages: usages, + Groups: groups, + }, nil +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/bootstraptokenhelpers_test.go b/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/bootstraptokenhelpers_test.go new file mode 100644 index 00000000000..9d1d97ccdbf --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/bootstraptokenhelpers_test.go @@ -0,0 +1,456 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +import ( + "encoding/json" + "reflect" + "testing" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// This timestamp is used as the reference value when computing expiration dates based on TTLs in these unit tests +var refTime = time.Date(1970, time.January, 1, 1, 1, 1, 0, time.UTC) + +func TestToSecret(t *testing.T) { + + var tests = []struct { + bt *BootstrapToken + secret *v1.Secret + }{ + { + &BootstrapToken{ // all together + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + Description: "foo", + Expires: &metav1.Time{ + Time: refTime, + }, + Usages: []string{"signing", "authentication"}, + Groups: []string{"system:bootstrappers", "system:bootstrappers:foo"}, + }, + &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bootstrap-token-abcdef", + Namespace: "kube-system", + }, + Type: v1.SecretType("bootstrap.kubernetes.io/token"), + Data: map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "description": []byte("foo"), + "expiration": []byte(refTime.Format(time.RFC3339)), + "usage-bootstrap-signing": []byte("true"), + "usage-bootstrap-authentication": []byte("true"), + "auth-extra-groups": []byte("system:bootstrappers,system:bootstrappers:foo"), + }, + }, + }, + } + for _, rt := range tests { + t.Run(rt.bt.Token.ID, func(t *testing.T) { + actual := rt.bt.ToSecret() + if !reflect.DeepEqual(actual, rt.secret) { + t.Errorf( + "failed BootstrapToken.ToSecret():\n\texpected: %v\n\t actual: %v", + rt.secret, + actual, + ) + } + }) + } +} + +func TestBootstrapTokenToSecretRoundtrip(t *testing.T) { + var tests = []struct { + bt *BootstrapToken + }{ + { + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + Description: "foo", + Expires: &metav1.Time{ + Time: refTime, + }, + Usages: []string{"authentication", "signing"}, + Groups: []string{"system:bootstrappers", "system:bootstrappers:foo"}, + }, + }, + } + for _, rt := range tests { + t.Run(rt.bt.Token.ID, func(t *testing.T) { + actual, err := BootstrapTokenFromSecret(rt.bt.ToSecret()) + if err != nil { + t.Errorf("failed BootstrapToken to Secret roundtrip with error: %v", err) + } + if !reflect.DeepEqual(actual, rt.bt) { + t.Errorf( + "failed BootstrapToken to Secret roundtrip:\n\texpected: %v\n\t actual: %v", + rt.bt, + actual, + ) + } + }) + } +} + +func TestEncodeTokenSecretData(t *testing.T) { + var tests = []struct { + name string + bt *BootstrapToken + data map[string][]byte + }{ + { + "the minimum amount of information needed to be specified", + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + }, + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + }, + }, + { + "adds description", + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + Description: "foo", + }, + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "description": []byte("foo"), + }, + }, + { + "adds ttl", + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + TTL: &metav1.Duration{ + Duration: mustParseDuration("2h", t), + }, + }, + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "expiration": []byte(refTime.Add(mustParseDuration("2h", t)).Format(time.RFC3339)), + }, + }, + { + "adds expiration", + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + Expires: &metav1.Time{ + Time: refTime, + }, + }, + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "expiration": []byte(refTime.Format(time.RFC3339)), + }, + }, + { + "adds ttl and expiration, should favor expiration", + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + TTL: &metav1.Duration{ + Duration: mustParseDuration("2h", t), + }, + Expires: &metav1.Time{ + Time: refTime, + }, + }, + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "expiration": []byte(refTime.Format(time.RFC3339)), + }, + }, + { + "adds usages", + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + Usages: []string{"authentication", "signing"}, + }, + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "usage-bootstrap-signing": []byte("true"), + "usage-bootstrap-authentication": []byte("true"), + }, + }, + { + "adds groups", + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + Groups: []string{"system:bootstrappers", "system:bootstrappers:foo"}, + }, + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "auth-extra-groups": []byte("system:bootstrappers,system:bootstrappers:foo"), + }, + }, + { + "all together", + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + Description: "foo", + TTL: &metav1.Duration{ + Duration: mustParseDuration("2h", t), + }, + Expires: &metav1.Time{ + Time: refTime, + }, + Usages: []string{"authentication", "signing"}, + Groups: []string{"system:bootstrappers", "system:bootstrappers:foo"}, + }, + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "description": []byte("foo"), + "expiration": []byte(refTime.Format(time.RFC3339)), + "usage-bootstrap-signing": []byte("true"), + "usage-bootstrap-authentication": []byte("true"), + "auth-extra-groups": []byte("system:bootstrappers,system:bootstrappers:foo"), + }, + }, + } + for _, rt := range tests { + t.Run(rt.name, func(t *testing.T) { + actual := encodeTokenSecretData(rt.bt, refTime) + if !reflect.DeepEqual(actual, rt.data) { + t.Errorf( + "failed encodeTokenSecretData:\n\texpected: %v\n\t actual: %v", + rt.data, + actual, + ) + } + }) + } +} + +func mustParseDuration(durationStr string, t *testing.T) time.Duration { + d, err := time.ParseDuration(durationStr) + if err != nil { + t.Fatalf("couldn't parse duration %q: %v", durationStr, err) + } + return d +} + +func TestBootstrapTokenFromSecret(t *testing.T) { + var tests = []struct { + desc string + name string + data map[string][]byte + bt *BootstrapToken + expectedError bool + }{ + { + "minimum information", + "bootstrap-token-abcdef", + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + }, + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + }, + false, + }, + { + "invalid token id", + "bootstrap-token-abcdef", + map[string][]byte{ + "token-id": []byte("abcdeF"), + "token-secret": []byte("abcdef0123456789"), + }, + nil, + true, + }, + { + "invalid secret naming", + "foo", + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + }, + nil, + true, + }, + { + "invalid token secret", + "bootstrap-token-abcdef", + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("ABCDEF0123456789"), + }, + nil, + true, + }, + { + "adds description", + "bootstrap-token-abcdef", + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "description": []byte("foo"), + }, + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + Description: "foo", + }, + false, + }, + { + "adds expiration", + "bootstrap-token-abcdef", + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "expiration": []byte(refTime.Format(time.RFC3339)), + }, + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + Expires: &metav1.Time{ + Time: refTime, + }, + }, + false, + }, + { + "invalid expiration", + "bootstrap-token-abcdef", + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "expiration": []byte("invalid date"), + }, + nil, + true, + }, + { + "adds usages", + "bootstrap-token-abcdef", + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "usage-bootstrap-signing": []byte("true"), + "usage-bootstrap-authentication": []byte("true"), + }, + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + Usages: []string{"authentication", "signing"}, + }, + false, + }, + { + "should ignore usages that aren't set to true", + "bootstrap-token-abcdef", + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "usage-bootstrap-signing": []byte("true"), + "usage-bootstrap-authentication": []byte("true"), + "usage-bootstrap-foo": []byte("false"), + "usage-bootstrap-bar": []byte(""), + }, + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + Usages: []string{"authentication", "signing"}, + }, + false, + }, + { + "adds groups", + "bootstrap-token-abcdef", + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "auth-extra-groups": []byte("system:bootstrappers,system:bootstrappers:foo"), + }, + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + Groups: []string{"system:bootstrappers", "system:bootstrappers:foo"}, + }, + false, + }, + { + "all fields set", + "bootstrap-token-abcdef", + map[string][]byte{ + "token-id": []byte("abcdef"), + "token-secret": []byte("abcdef0123456789"), + "description": []byte("foo"), + "expiration": []byte(refTime.Format(time.RFC3339)), + "usage-bootstrap-signing": []byte("true"), + "usage-bootstrap-authentication": []byte("true"), + "auth-extra-groups": []byte("system:bootstrappers,system:bootstrappers:foo"), + }, + &BootstrapToken{ + Token: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, + Description: "foo", + Expires: &metav1.Time{ + Time: refTime, + }, + Usages: []string{"authentication", "signing"}, + Groups: []string{"system:bootstrappers", "system:bootstrappers:foo"}, + }, + false, + }, + } + for _, rt := range tests { + t.Run(rt.desc, func(t *testing.T) { + actual, err := BootstrapTokenFromSecret(&v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: rt.name, + Namespace: "kube-system", + }, + Type: v1.SecretType("bootstrap.kubernetes.io/token"), + Data: rt.data, + }) + if (err != nil) != rt.expectedError { + t.Errorf( + "failed BootstrapTokenFromSecret\n\texpected error: %t\n\t actual error: %v", + rt.expectedError, + err, + ) + } else { + if actual == nil && rt.bt == nil { + // if both pointers are nil, it's okay, just continue + return + } + // If one of the pointers is defined but the other isn't, throw error. If both pointers are defined but unequal, throw error + if (actual == nil && rt.bt != nil) || (actual != nil && rt.bt == nil) || !reflect.DeepEqual(*actual, *rt.bt) { + t.Errorf( + "failed BootstrapTokenFromSecret\n\texpected: %s\n\t actual: %s", + jsonMarshal(rt.bt), + jsonMarshal(actual), + ) + } + } + }) + } +} + +func jsonMarshal(bt *BootstrapToken) string { + b, _ := json.Marshal(*bt) + return string(b) +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/bootstraptokenstring.go b/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/bootstraptokenstring.go new file mode 100644 index 00000000000..282d305afb8 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/bootstraptokenstring.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeadm holds the internal kubeadm API types +// Note: This file should be kept in sync with the similar one for the external API +// TODO: The BootstrapTokenString object should move out to either k8s.io/client-go or k8s.io/api in the future +// (probably as part of Bootstrap Tokens going GA). It should not be staged under the kubeadm API as it is now. +package kubeadm + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + bootstrapapi "k8s.io/cluster-bootstrap/token/api" + bootstraputil "k8s.io/cluster-bootstrap/token/util" +) + +// BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used +// for both validation of the practically of the API server from a joining node's point +// of view and as an authentication method for the node in the bootstrap phase of +// "kubeadm join". This token is and should be short-lived +type BootstrapTokenString struct { + ID string + Secret string +} + +// MarshalJSON implements the json.Marshaler interface. +func (bts BootstrapTokenString) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, bts.String())), nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (bts *BootstrapTokenString) UnmarshalJSON(b []byte) error { + // If the token is represented as "", just return quickly without an error + if len(b) == 0 { + return nil + } + + // Remove unnecessary " characters coming from the JSON parser + token := strings.Replace(string(b), `"`, ``, -1) + // Convert the string Token to a BootstrapTokenString object + newbts, err := NewBootstrapTokenString(token) + if err != nil { + return err + } + bts.ID = newbts.ID + bts.Secret = newbts.Secret + return nil +} + +// String returns the string representation of the BootstrapTokenString +func (bts BootstrapTokenString) String() string { + if len(bts.ID) > 0 && len(bts.Secret) > 0 { + return bootstraputil.TokenFromIDAndSecret(bts.ID, bts.Secret) + } + return "" +} + +// NewBootstrapTokenString converts the given Bootstrap Token as a string +// to the BootstrapTokenString object used for serialization/deserialization +// and internal usage. It also automatically validates that the given token +// is of the right format +func NewBootstrapTokenString(token string) (*BootstrapTokenString, error) { + substrs := bootstraputil.BootstrapTokenRegexp.FindStringSubmatch(token) + // TODO: Add a constant for the 3 value here, and explain better why it's needed (other than because how the regexp parsin works) + if len(substrs) != 3 { + return nil, errors.Errorf("the bootstrap token %q was not of the form %q", token, bootstrapapi.BootstrapTokenPattern) + } + + return &BootstrapTokenString{ID: substrs[1], Secret: substrs[2]}, nil +} + +// NewBootstrapTokenStringFromIDAndSecret is a wrapper around NewBootstrapTokenString +// that allows the caller to specify the ID and Secret separately +func NewBootstrapTokenStringFromIDAndSecret(id, secret string) (*BootstrapTokenString, error) { + return NewBootstrapTokenString(bootstraputil.TokenFromIDAndSecret(id, secret)) +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/types.go b/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/types.go new file mode 100644 index 00000000000..ee0c547ad2b --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm/types.go @@ -0,0 +1,48 @@ +/* +Copyright 2016 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BootstrapToken describes one bootstrap token, stored as a Secret in the cluster +// TODO: The BootstrapToken object should move out to either k8s.io/client-go or k8s.io/api in the future +// (probably as part of Bootstrap Tokens going GA). It should not be staged under the kubeadm API as it is now. +type BootstrapToken struct { + // Token is used for establishing bidirectional trust between nodes and control-planes. + // Used for joining nodes in the cluster. + Token *BootstrapTokenString + // Description sets a human-friendly message why this token exists and what it's used + // for, so other administrators can know its purpose. + Description string + // TTL defines the time to live for this token. Defaults to 24h. + // Expires and TTL are mutually exclusive. + TTL *metav1.Duration + // Expires specifies the timestamp when this token expires. Defaults to being set + // dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. + Expires *metav1.Time + // Usages describes the ways in which this token can be used. Can by default be used + // for establishing bidirectional trust, but that can be changed here. + Usages []string + // Groups specifies the extra groups that this token will authenticate as when/if + // used for authentication + Groups []string +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/cmd/options/constant.go b/pkg/yurtctl/kubernetes/kubeadm/app/cmd/options/constant.go new file mode 100644 index 00000000000..0bee0ae3c50 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/cmd/options/constant.go @@ -0,0 +1,77 @@ +/* +Copyright 2019 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +const ( + // APIServerAdvertiseAddress flag sets the IP address the API Server will advertise it's listening on. Specify '0.0.0.0' to use the address of the default network interface. + APIServerAdvertiseAddress = "apiserver-advertise-address" + + // APIServerBindPort flag sets the port for the API Server to bind to. + APIServerBindPort = "apiserver-bind-port" + + // CertificatesDir flag sets the path where to save and read the certificates. + CertificatesDir = "cert-dir" + + // CfgPath flag sets the path to kubeadm config file. + CfgPath = "config" + + // DryRun flag instruct kubeadm to don't apply any changes; just output what would be done. + DryRun = "dry-run" + + // IgnorePreflightErrors sets the path a list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + IgnorePreflightErrors = "ignore-preflight-errors" + + // KubeconfigPath flag sets the kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations are searched for an existing KubeConfig file. + KubeconfigPath = "kubeconfig" + + // KubernetesVersion flag sets the Kubernetes version for the control plane. + KubernetesVersion = "kubernetes-version" + + // NodeCRISocket flag sets the CRI socket to connect to. + NodeCRISocket = "cri-socket" + + // NodeName flag sets the node name. + NodeName = "node-name" + + // TokenStr flags sets both the discovery-token and the tls-bootstrap-token when those values are not provided + TokenStr = "token" + + // TokenDiscoveryCAHash flag instruct kubeadm to validate that the root CA public key matches this hash (for token-based discovery) + TokenDiscoveryCAHash = "discovery-token-ca-cert-hash" + + // TokenDiscoverySkipCAHash flag instruct kubeadm to skip CA hash verification (for token-based discovery) + TokenDiscoverySkipCAHash = "discovery-token-unsafe-skip-ca-verification" + + // ForceReset flag instruct kubeadm to reset the node without prompting for confirmation + ForceReset = "force" + + // NodeType flag sets the type of worker node to edge or cloud. + NodeType = "node-type" + + // Organizations flag sets the extra organizations of hub agent client certificate. + Organizations = "organizations" + + // NodeLabels flag sets the labels for worker node. + NodeLabels = "node-labels" + + // PauseImage flag sets the pause image for worker node. + PauseImage = "pause-image" + + // YurtHubImage flag sets the yurthub image for worker node. + YurtHubImage = "yurthub-image" +) diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow/phase.go b/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow/phase.go new file mode 100644 index 00000000000..554635ac56c --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow/phase.go @@ -0,0 +1,86 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// Phase provides an implementation of a workflow phase that allows +// creation of new phases by simply instantiating a variable of this type. +type Phase struct { + // name of the phase. + // Phase name should be unique among peer phases (phases belonging to + // the same workflow or phases belonging to the same parent phase). + Name string + + // Aliases returns the aliases for the phase. + Aliases []string + + // Short description of the phase. + Short string + + // Long returns the long description of the phase. + Long string + + // Example returns the example for the phase. + Example string + + // Hidden define if the phase should be hidden in the workflow help. + // e.g. PrintFilesIfDryRunning phase in the kubeadm init workflow is candidate for being hidden to the users + Hidden bool + + // Phases defines a nested, ordered sequence of phases. + Phases []Phase + + // RunAllSiblings allows to assign to a phase the responsibility to + // run all the sibling phases + // Nb. phase marked as RunAllSiblings can not have Run functions + RunAllSiblings bool + + // Run defines a function implementing the phase action. + // It is recommended to implent type assertion, e.g. using golang type switch, + // for validating the RunData type. + Run func(data RunData) error + + // RunIf define a function that implements a condition that should be checked + // before executing the phase action. + // If this function return nil, the phase action is always executed. + RunIf func(data RunData) (bool, error) + + // InheritFlags defines the list of flags that the cobra command generated for this phase should Inherit + // from local flags defined in the parent command / or additional flags defined in the phase runner. + // If the values is not set or empty, no flags will be assigned to the command + // Nb. global flags are automatically inherited by nested cobra command + InheritFlags []string + + // LocalFlags defines the list of flags that should be assigned to the cobra command generated + // for this phase. + // Nb. if two or phases have the same local flags, please consider using local flags in the parent command + // or additional flags defined in the phase runner. + LocalFlags *pflag.FlagSet + + // ArgsValidator defines the positional arg function to be used for validating args for this phase + // If not set a phase will adopt the args of the top level command. + ArgsValidator cobra.PositionalArgs +} + +// AppendPhase adds the given phase to the nested, ordered sequence of phases. +func (t *Phase) AppendPhase(phase Phase) { + t.Phases = append(t.Phases, phase) +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow/runner.go b/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow/runner.go new file mode 100644 index 00000000000..60cd89e980a --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow/runner.go @@ -0,0 +1,485 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// phaseSeparator defines the separator to be used when concatenating nested +// phase names +const phaseSeparator = "/" + +// RunnerOptions defines the options supported during the execution of a +// kubeadm composable workflows +type RunnerOptions struct { + // FilterPhases defines the list of phases to be executed (if empty, all). + FilterPhases []string + + // SkipPhases defines the list of phases to be excluded by execution (if empty, none). + SkipPhases []string +} + +// RunData defines the data shared among all the phases included in the workflow, that is any type. +type RunData = interface{} + +// Runner implements management of composable kubeadm workflows. +type Runner struct { + // Options that regulate the runner behavior. + Options RunnerOptions + + // Phases composing the workflow to be managed by the runner. + Phases []Phase + + // runDataInitializer defines a function that creates the runtime data shared + // among all the phases included in the workflow + runDataInitializer func(*cobra.Command, []string) (RunData, error) + + // runData is part of the internal state of the runner and it is used for implementing + // a singleton in the InitData methods (thus avoiding to initialize data + // more than one time) + runData RunData + + // runCmd is part of the internal state of the runner and it is used to track the + // command that will trigger the runner (only if the runner is BindToCommand). + runCmd *cobra.Command + + // cmdAdditionalFlags holds additional, shared flags that could be added to the subcommands generated + // for phases. Flags could be inherited from the parent command too or added directly to each phase + cmdAdditionalFlags *pflag.FlagSet + + // phaseRunners is part of the internal state of the runner and provides + // a list of wrappers to phases composing the workflow with contextual + // information supporting phase execution. + phaseRunners []*phaseRunner +} + +// phaseRunner provides a wrapper to a Phase with the addition of a set +// of contextual information derived by the workflow managed by the Runner. +// TODO: If we ever decide to get more sophisticated we can swap this type with a well defined dag or tree library. +type phaseRunner struct { + // Phase provide access to the phase implementation + Phase + + // provide access to the parent phase in the workflow managed by the Runner. + parent *phaseRunner + + // level define the level of nesting of this phase into the workflow managed by + // the Runner. + level int + + // selfPath contains all the elements of the path that identify the phase into + // the workflow managed by the Runner. + selfPath []string + + // generatedName is the full name of the phase, that corresponds to the absolute + // path of the phase in the workflow managed by the Runner. + generatedName string + + // use is the phase usage string that will be printed in the workflow help. + // It corresponds to the relative path of the phase in the workflow managed by the Runner. + use string +} + +// NewRunner return a new runner for composable kubeadm workflows. +func NewRunner() *Runner { + return &Runner{ + Phases: []Phase{}, + } +} + +// AppendPhase adds the given phase to the ordered sequence of phases managed by the runner. +func (e *Runner) AppendPhase(t Phase) { + e.Phases = append(e.Phases, t) +} + +// computePhaseRunFlags return a map defining which phase should be run and which not. +// PhaseRunFlags are computed according to RunnerOptions. +func (e *Runner) computePhaseRunFlags() (map[string]bool, error) { + // Initialize support data structure + phaseRunFlags := map[string]bool{} + phaseHierarchy := map[string][]string{} + e.visitAll(func(p *phaseRunner) error { + // Initialize phaseRunFlags assuming that all the phases should be run. + phaseRunFlags[p.generatedName] = true + + // Initialize phaseHierarchy for the current phase (the list of phases + // depending on the current phase + phaseHierarchy[p.generatedName] = []string{} + + // Register current phase as part of its own parent hierarchy + parent := p.parent + for parent != nil { + phaseHierarchy[parent.generatedName] = append(phaseHierarchy[parent.generatedName], p.generatedName) + parent = parent.parent + } + return nil + }) + + // If a filter option is specified, set all phaseRunFlags to false except for + // the phases included in the filter and their hierarchy of nested phases. + if len(e.Options.FilterPhases) > 0 { + for i := range phaseRunFlags { + phaseRunFlags[i] = false + } + for _, f := range e.Options.FilterPhases { + if _, ok := phaseRunFlags[f]; !ok { + return phaseRunFlags, errors.Errorf("invalid phase name: %s", f) + } + phaseRunFlags[f] = true + for _, c := range phaseHierarchy[f] { + phaseRunFlags[c] = true + } + } + } + + // If a phase skip option is specified, set the corresponding phaseRunFlags + // to false and apply the same change to the underlying hierarchy + for _, f := range e.Options.SkipPhases { + if _, ok := phaseRunFlags[f]; !ok { + return phaseRunFlags, errors.Errorf("invalid phase name: %s", f) + } + phaseRunFlags[f] = false + for _, c := range phaseHierarchy[f] { + phaseRunFlags[c] = false + } + } + + return phaseRunFlags, nil +} + +// SetDataInitializer allows to setup a function that initialize the runtime data shared +// among all the phases included in the workflow. +// The method will receive in input the cmd that triggers the Runner (only if the runner is BindToCommand) +func (e *Runner) SetDataInitializer(builder func(cmd *cobra.Command, args []string) (RunData, error)) { + e.runDataInitializer = builder +} + +// InitData triggers the creation of runtime data shared among all the phases included in the workflow. +// This action can be executed explicitly out, when it is necessary to get the RunData +// before actually executing Run, or implicitly when invoking Run. +func (e *Runner) InitData(args []string) (RunData, error) { + if e.runData == nil && e.runDataInitializer != nil { + var err error + if e.runData, err = e.runDataInitializer(e.runCmd, args); err != nil { + return nil, err + } + } + + return e.runData, nil +} + +// Run the kubeadm composable kubeadm workflows. +func (e *Runner) Run(args []string) error { + e.prepareForExecution() + + // determine which phase should be run according to RunnerOptions + phaseRunFlags, err := e.computePhaseRunFlags() + if err != nil { + return err + } + + // builds the runner data + var data RunData + if data, err = e.InitData(args); err != nil { + return err + } + + err = e.visitAll(func(p *phaseRunner) error { + // if the phase should not be run, skip the phase. + if run, ok := phaseRunFlags[p.generatedName]; !run || !ok { + return nil + } + + // Errors if phases that are meant to create special subcommands only + // are wrongly assigned Run Methods + if p.RunAllSiblings && (p.RunIf != nil || p.Run != nil) { + return errors.Wrapf(err, "phase marked as RunAllSiblings can not have Run functions %s", p.generatedName) + } + + // If the phase defines a condition to be checked before executing the phase action. + if p.RunIf != nil { + // Check the condition and returns if the condition isn't satisfied (or fails) + ok, err := p.RunIf(data) + if err != nil { + return errors.Wrapf(err, "error execution run condition for phase %s", p.generatedName) + } + + if !ok { + return nil + } + } + + // Runs the phase action (if defined) + if p.Run != nil { + if err := p.Run(data); err != nil { + return errors.Wrapf(err, "error execution phase %s", p.generatedName) + } + } + + return nil + }) + + return err +} + +// Help returns text with the list of phases included in the workflow. +func (e *Runner) Help(cmdUse string) string { + e.prepareForExecution() + + // computes the max length of for each phase use line + maxLength := 0 + e.visitAll(func(p *phaseRunner) error { + if !p.Hidden && !p.RunAllSiblings { + length := len(p.use) + if maxLength < length { + maxLength = length + } + } + return nil + }) + + // prints the list of phases indented by level and formatted using the maxlength + // the list is enclosed in a mardown code block for ensuring better readability in the public web site + line := fmt.Sprintf("The %q command executes the following phases:\n", cmdUse) + line += "```\n" + offset := 2 + e.visitAll(func(p *phaseRunner) error { + if !p.Hidden && !p.RunAllSiblings { + padding := maxLength - len(p.use) + offset + line += strings.Repeat(" ", offset*p.level) // indentation + line += p.use // name + aliases + line += strings.Repeat(" ", padding) // padding right up to max length (+ offset for spacing) + line += p.Short // phase short description + line += "\n" + } + + return nil + }) + line += "```" + return line +} + +// SetAdditionalFlags allows to define flags to be added +// to the subcommands generated for each phase (but not existing in the parent command). +// Please note that this command needs to be done before BindToCommand. +// Nb. if a flag is used only by one phase, please consider using phase LocalFlags. +func (e *Runner) SetAdditionalFlags(fn func(*pflag.FlagSet)) { + // creates a new NewFlagSet + e.cmdAdditionalFlags = pflag.NewFlagSet("phaseAdditionalFlags", pflag.ContinueOnError) + // invokes the function that sets additional flags + fn(e.cmdAdditionalFlags) +} + +// BindToCommand bind the Runner to a cobra command by altering +// command help, adding phase related flags and by adding phases subcommands +// Please note that this command needs to be done once all the phases are added to the Runner. +func (e *Runner) BindToCommand(cmd *cobra.Command) { + // keep track of the command triggering the runner + e.runCmd = cmd + + // return early if no phases were added + if len(e.Phases) == 0 { + return + } + + e.prepareForExecution() + + // adds the phases subcommand + phaseCommand := &cobra.Command{ + Use: "phase", + Short: fmt.Sprintf("Use this command to invoke single phase of the %s workflow", cmd.Name()), + } + + cmd.AddCommand(phaseCommand) + + // generate all the nested subcommands for invoking single phases + subcommands := map[string]*cobra.Command{} + e.visitAll(func(p *phaseRunner) error { + // skip hidden phases + if p.Hidden { + return nil + } + + // initialize phase selector + phaseSelector := p.generatedName + + // if requested, set the phase to run all the sibling phases + if p.RunAllSiblings { + phaseSelector = p.parent.generatedName + } + + // creates phase subcommand + phaseCmd := &cobra.Command{ + Use: strings.ToLower(p.Name), + Short: p.Short, + Long: p.Long, + Example: p.Example, + Aliases: p.Aliases, + RunE: func(cmd *cobra.Command, args []string) error { + // if the phase has subphases, print the help and exits + if len(p.Phases) > 0 { + cmd.Help() + return nil + } + + // overrides the command triggering the Runner using the phaseCmd + e.runCmd = cmd + e.Options.FilterPhases = []string{phaseSelector} + return e.Run(args) + }, + } + + // makes the new command inherits local flags from the parent command + // Nb. global flags will be inherited automatically + inheritsFlags(cmd.Flags(), phaseCmd.Flags(), p.InheritFlags) + + // makes the new command inherits additional flags for phases + if e.cmdAdditionalFlags != nil { + inheritsFlags(e.cmdAdditionalFlags, phaseCmd.Flags(), p.InheritFlags) + } + + // If defined, added phase local flags + if p.LocalFlags != nil { + p.LocalFlags.VisitAll(func(f *pflag.Flag) { + phaseCmd.Flags().AddFlag(f) + }) + } + + // if this phase has children (not a leaf) it doesn't accept any args + if len(p.Phases) > 0 { + phaseCmd.Args = cobra.NoArgs + } else { + if p.ArgsValidator == nil { + phaseCmd.Args = cmd.Args + } else { + phaseCmd.Args = p.ArgsValidator + } + } + + // adds the command to parent + if p.level == 0 { + phaseCommand.AddCommand(phaseCmd) + } else { + subcommands[p.parent.generatedName].AddCommand(phaseCmd) + } + + subcommands[p.generatedName] = phaseCmd + return nil + }) + + // alters the command description to show available phases + if cmd.Long != "" { + cmd.Long = fmt.Sprintf("%s\n\n%s\n", cmd.Long, e.Help(cmd.Use)) + } else { + cmd.Long = fmt.Sprintf("%s\n\n%s\n", cmd.Short, e.Help(cmd.Use)) + } + + // adds phase related flags to the main command + cmd.Flags().StringSliceVar(&e.Options.SkipPhases, "skip-phases", nil, "List of phases to be skipped") +} + +func inheritsFlags(sourceFlags, targetFlags *pflag.FlagSet, cmdFlags []string) { + // If the list of flag to be inherited from the parent command is not defined, no flag is added + if cmdFlags == nil { + return + } + + // add all the flags to be inherited to the target flagSet + sourceFlags.VisitAll(func(f *pflag.Flag) { + for _, c := range cmdFlags { + if f.Name == c { + targetFlags.AddFlag(f) + } + } + }) +} + +// visitAll provides a utility method for visiting all the phases in the workflow +// in the execution order and executing a func on each phase. +// Nested phase are visited immediately after their parent phase. +func (e *Runner) visitAll(fn func(*phaseRunner) error) error { + for _, currentRunner := range e.phaseRunners { + if err := fn(currentRunner); err != nil { + return err + } + } + return nil +} + +// prepareForExecution initialize the internal state of the Runner (the list of phaseRunner). +func (e *Runner) prepareForExecution() { + e.phaseRunners = []*phaseRunner{} + var parentRunner *phaseRunner + for _, phase := range e.Phases { + // skips phases that are meant to create special subcommands only + if phase.RunAllSiblings { + continue + } + + // add phases to the execution list + addPhaseRunner(e, parentRunner, phase) + } +} + +// addPhaseRunner adds the phaseRunner for a given phase to the phaseRunners list +func addPhaseRunner(e *Runner, parentRunner *phaseRunner, phase Phase) { + // computes contextual information derived by the workflow managed by the Runner. + use := cleanName(phase.Name) + generatedName := use + selfPath := []string{generatedName} + + if parentRunner != nil { + generatedName = strings.Join([]string{parentRunner.generatedName, generatedName}, phaseSeparator) + use = fmt.Sprintf("%s%s", phaseSeparator, use) + selfPath = append(parentRunner.selfPath, selfPath...) + } + + // creates the phaseRunner + currentRunner := &phaseRunner{ + Phase: phase, + parent: parentRunner, + level: len(selfPath) - 1, + selfPath: selfPath, + generatedName: generatedName, + use: use, + } + + // adds to the phaseRunners list + e.phaseRunners = append(e.phaseRunners, currentRunner) + + // iterate for the nested, ordered list of phases, thus storing + // phases in the expected executing order (child phase are stored immediately after their parent phase). + for _, childPhase := range phase.Phases { + addPhaseRunner(e, currentRunner, childPhase) + } +} + +// cleanName makes phase name suitable for the runner help, by lowercasing the name +// and removing args descriptors, if any +func cleanName(name string) string { + ret := strings.ToLower(name) + if pos := strings.Index(ret, " "); pos != -1 { + ret = ret[:pos] + } + return ret +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow/runner_test.go b/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow/runner_test.go new file mode 100644 index 00000000000..ef546fdd367 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/cmd/phases/workflow/runner_test.go @@ -0,0 +1,625 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func phaseBuilder(name string, phases ...Phase) Phase { + return Phase{ + Name: name, + Short: fmt.Sprintf("long description for %s ...", name), + Phases: phases, + } +} + +func TestComputePhaseRunFlags(t *testing.T) { + + var usecases = []struct { + name string + options RunnerOptions + expected map[string]bool + expectedError bool + }{ + { + name: "no options > all phases", + options: RunnerOptions{}, + expected: map[string]bool{"foo": true, "foo/bar": true, "foo/baz": true, "qux": true}, + }, + { + name: "options can filter phases", + options: RunnerOptions{FilterPhases: []string{"foo/baz", "qux"}}, + expected: map[string]bool{"foo": false, "foo/bar": false, "foo/baz": true, "qux": true}, + }, + { + name: "options can filter phases - hierarchy is considered", + options: RunnerOptions{FilterPhases: []string{"foo"}}, + expected: map[string]bool{"foo": true, "foo/bar": true, "foo/baz": true, "qux": false}, + }, + { + name: "options can skip phases", + options: RunnerOptions{SkipPhases: []string{"foo/bar", "qux"}}, + expected: map[string]bool{"foo": true, "foo/bar": false, "foo/baz": true, "qux": false}, + }, + { + name: "options can skip phases - hierarchy is considered", + options: RunnerOptions{SkipPhases: []string{"foo"}}, + expected: map[string]bool{"foo": false, "foo/bar": false, "foo/baz": false, "qux": true}, + }, + { + name: "skip options have higher precedence than filter options", + options: RunnerOptions{ + FilterPhases: []string{"foo"}, // "foo", "foo/bar", "foo/baz" true + SkipPhases: []string{"foo/bar"}, // "foo/bar" false + }, + expected: map[string]bool{"foo": true, "foo/bar": false, "foo/baz": true, "qux": false}, + }, + { + name: "invalid filter option", + options: RunnerOptions{FilterPhases: []string{"invalid"}}, + expectedError: true, + }, + { + name: "invalid skip option", + options: RunnerOptions{SkipPhases: []string{"invalid"}}, + expectedError: true, + }, + } + for _, u := range usecases { + t.Run(u.name, func(t *testing.T) { + var w = Runner{ + Phases: []Phase{ + phaseBuilder("foo", + phaseBuilder("bar"), + phaseBuilder("baz"), + ), + phaseBuilder("qux"), + }, + } + + w.prepareForExecution() + w.Options = u.options + actual, err := w.computePhaseRunFlags() + if (err != nil) != u.expectedError { + t.Errorf("Unexpected error: %v", err) + } + if err != nil { + return + } + if !reflect.DeepEqual(actual, u.expected) { + t.Errorf("\nactual:\n\t%v\nexpected:\n\t%v\n", actual, u.expected) + } + }) + } +} + +func phaseBuilder1(name string, runIf func(data RunData) (bool, error), phases ...Phase) Phase { + return Phase{ + Name: name, + Short: fmt.Sprintf("long description for %s ...", name), + Phases: phases, + Run: runBuilder(name), + RunIf: runIf, + } +} + +var callstack []string + +func runBuilder(name string) func(data RunData) error { + return func(data RunData) error { + callstack = append(callstack, name) + return nil + } +} + +func runConditionTrue(data RunData) (bool, error) { + return true, nil +} + +func runConditionFalse(data RunData) (bool, error) { + return false, nil +} + +func TestRunOrderAndConditions(t *testing.T) { + var w = Runner{ + Phases: []Phase{ + phaseBuilder1("foo", nil, + phaseBuilder1("bar", runConditionTrue), + phaseBuilder1("baz", runConditionFalse), + ), + phaseBuilder1("qux", runConditionTrue), + }, + } + + var usecases = []struct { + name string + options RunnerOptions + expectedOrder []string + }{ + { + name: "Run respect runCondition", + expectedOrder: []string{"foo", "bar", "qux"}, + }, + { + name: "Run takes options into account", + options: RunnerOptions{FilterPhases: []string{"foo"}, SkipPhases: []string{"foo/baz"}}, + expectedOrder: []string{"foo", "bar"}, + }, + } + for _, u := range usecases { + t.Run(u.name, func(t *testing.T) { + callstack = []string{} + w.Options = u.options + err := w.Run([]string{}) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !reflect.DeepEqual(callstack, u.expectedOrder) { + t.Errorf("\ncallstack:\n\t%v\nexpected:\n\t%v\n", callstack, u.expectedOrder) + } + }) + } +} + +func phaseBuilder2(name string, runIf func(data RunData) (bool, error), run func(data RunData) error, phases ...Phase) Phase { + return Phase{ + Name: name, + Short: fmt.Sprintf("long description for %s ...", name), + Phases: phases, + Run: run, + RunIf: runIf, + } +} + +func runPass(data RunData) error { + return nil +} + +func runFails(data RunData) error { + return errors.New("run fails") +} + +func runConditionPass(data RunData) (bool, error) { + return true, nil +} + +func runConditionFails(data RunData) (bool, error) { + return false, errors.New("run condition fails") +} + +func TestRunHandleErrors(t *testing.T) { + var w = Runner{ + Phases: []Phase{ + phaseBuilder2("foo", runConditionPass, runPass), + phaseBuilder2("bar", runConditionPass, runFails), + phaseBuilder2("baz", runConditionFails, runPass), + }, + } + + var usecases = []struct { + name string + options RunnerOptions + expectedError bool + }{ + { + name: "no errors", + options: RunnerOptions{FilterPhases: []string{"foo"}}, + }, + { + name: "run fails", + options: RunnerOptions{FilterPhases: []string{"bar"}}, + expectedError: true, + }, + { + name: "run condition fails", + options: RunnerOptions{FilterPhases: []string{"baz"}}, + expectedError: true, + }, + } + for _, u := range usecases { + t.Run(u.name, func(t *testing.T) { + w.Options = u.options + err := w.Run([]string{}) + if (err != nil) != u.expectedError { + t.Errorf("Unexpected error: %v", err) + } + }) + } +} + +func phaseBuilder3(name string, hidden bool, phases ...Phase) Phase { + return Phase{ + Name: name, + Short: fmt.Sprintf("long description for %s ...", name), + Phases: phases, + Hidden: hidden, + } +} + +func TestHelp(t *testing.T) { + var w = Runner{ + Phases: []Phase{ + phaseBuilder3("foo", false, + phaseBuilder3("bar [arg]", false), + phaseBuilder3("baz", true), + ), + phaseBuilder3("qux", false), + }, + } + + expected := "The \"myCommand\" command executes the following phases:\n" + + "```\n" + + "foo long description for foo ...\n" + + " /bar long description for bar [arg] ...\n" + + "qux long description for qux ...\n" + + "```" + + actual := w.Help("myCommand") + if !reflect.DeepEqual(actual, expected) { + t.Errorf("\nactual:\n\t%v\nexpected:\n\t%v\n", actual, expected) + } +} + +func phaseBuilder4(name string, cmdFlags []string, phases ...Phase) Phase { + return Phase{ + Name: name, + Phases: phases, + InheritFlags: cmdFlags, + } +} + +func phaseBuilder5(name string, flags *pflag.FlagSet) Phase { + return Phase{ + Name: name, + LocalFlags: flags, + } +} + +type argTest struct { + args cobra.PositionalArgs + pass []string + fail []string +} + +func phaseBuilder6(name string, args cobra.PositionalArgs, phases ...Phase) Phase { + return Phase{ + Name: name, + Short: fmt.Sprintf("long description for %s ...", name), + Phases: phases, + ArgsValidator: args, + } +} + +// customArgs is a custom cobra.PositionArgs function +func customArgs(cmd *cobra.Command, args []string) error { + for _, a := range args { + if a != "qux" { + return fmt.Errorf("arg %s does not equal qux", a) + } + } + return nil +} + +func TestBindToCommandArgRequirements(t *testing.T) { + + // because cobra.ExactArgs(1) == cobra.ExactArgs(3), it is needed + // to run test argument sets that both pass and fail to ensure the correct function was set. + var usecases = []struct { + name string + runner Runner + testCases map[string]argTest + cmd *cobra.Command + }{ + { + name: "leaf command, no defined args, follow parent", + runner: Runner{ + Phases: []Phase{phaseBuilder("foo")}, + }, + testCases: map[string]argTest{ + "phase foo": { + pass: []string{"one", "two", "three"}, + fail: []string{"one", "two"}, + args: cobra.ExactArgs(3), + }, + }, + cmd: &cobra.Command{ + Use: "init", + Args: cobra.ExactArgs(3), + }, + }, + { + name: "container cmd expect none, custom arg check for leaf", + runner: Runner{ + Phases: []Phase{phaseBuilder6("foo", cobra.NoArgs, + phaseBuilder6("bar", cobra.ExactArgs(1)), + phaseBuilder6("baz", customArgs), + )}, + }, + testCases: map[string]argTest{ + "phase foo": { + pass: []string{}, + fail: []string{"one"}, + args: cobra.NoArgs, + }, + "phase foo bar": { + pass: []string{"one"}, + fail: []string{"one", "two"}, + args: cobra.ExactArgs(1), + }, + "phase foo baz": { + pass: []string{"qux"}, + fail: []string{"one"}, + args: customArgs, + }, + }, + cmd: &cobra.Command{ + Use: "init", + Args: cobra.NoArgs, + }, + }, + } + + for _, rt := range usecases { + t.Run(rt.name, func(t *testing.T) { + + rt.runner.BindToCommand(rt.cmd) + + // Checks that cmd gets a new phase subcommand + phaseCmd := getCmd(rt.cmd, "phase") + if phaseCmd == nil { + t.Error("cmd didn't have phase subcommand\n") + return + } + + for c, args := range rt.testCases { + + cCmd := getCmd(rt.cmd, c) + if cCmd == nil { + t.Errorf("cmd didn't have %s subcommand\n", c) + continue + } + + // Ensure it is the expected function + if reflect.ValueOf(cCmd.Args).Pointer() != reflect.ValueOf(args.args).Pointer() { + t.Error("The function poiners where not equal.") + } + + // Test passing argument set + err := cCmd.Args(cCmd, args.pass) + + if err != nil { + t.Errorf("command %s should validate the args: %v\n %v", cCmd.Name(), args.pass, err) + } + + // Test failing argument set + err = cCmd.Args(cCmd, args.fail) + + if err == nil { + t.Errorf("command %s should fail to validate the args: %v\n %v", cCmd.Name(), args.pass, err) + } + } + + }) + } +} + +func TestBindToCommand(t *testing.T) { + + var dummy string + localFlags := pflag.NewFlagSet("dummy", pflag.ContinueOnError) + localFlags.StringVarP(&dummy, "flag4", "d", "d", "d") + + var usecases = []struct { + name string + runner Runner + expectedCmdAndFlags map[string][]string + setAdditionalFlags func(*pflag.FlagSet) + }{ + { + name: "when there are no phases, cmd should be left untouched", + runner: Runner{}, + }, + { + name: "phases should not inherits any parent flags by default", + runner: Runner{ + Phases: []Phase{phaseBuilder4("foo", nil)}, + }, + expectedCmdAndFlags: map[string][]string{ + "phase foo": {}, + }, + }, + { + name: "phases should be allowed to select parent flags to inherits", + runner: Runner{ + Phases: []Phase{phaseBuilder4("foo", []string{"flag1"})}, + }, + expectedCmdAndFlags: map[string][]string{ + "phase foo": {"flag1"}, //not "flag2" + }, + }, + { + name: "it should be possible to apply additional flags to all phases", + runner: Runner{ + Phases: []Phase{ + phaseBuilder4("foo", []string{"flag3"}), + phaseBuilder4("bar", []string{"flag1", "flag2", "flag3"}), + phaseBuilder4("baz", []string{"flag1"}), //test if additional flags are filtered too + }, + }, + setAdditionalFlags: func(flags *pflag.FlagSet) { + var dummy3 string + flags.StringVarP(&dummy3, "flag3", "c", "c", "c") + }, + expectedCmdAndFlags: map[string][]string{ + "phase foo": {"flag3"}, + "phase bar": {"flag1", "flag2", "flag3"}, + "phase baz": {"flag1"}, + }, + }, + { + name: "it should be possible to apply custom flags to single phases", + runner: Runner{ + Phases: []Phase{phaseBuilder5("foo", localFlags)}, + }, + expectedCmdAndFlags: map[string][]string{ + "phase foo": {"flag4"}, + }, + }, + { + name: "all the above applies to nested phases too", + runner: Runner{ + Phases: []Phase{ + phaseBuilder4("foo", []string{"flag3"}, + phaseBuilder4("bar", []string{"flag1", "flag2", "flag3"}), + phaseBuilder4("baz", []string{"flag1"}), //test if additional flags are filtered too + phaseBuilder5("qux", localFlags), + ), + }, + }, + setAdditionalFlags: func(flags *pflag.FlagSet) { + var dummy3 string + flags.StringVarP(&dummy3, "flag3", "c", "c", "c") + }, + expectedCmdAndFlags: map[string][]string{ + "phase foo": {"flag3"}, + "phase foo bar": {"flag1", "flag2", "flag3"}, + "phase foo baz": {"flag1"}, + "phase foo qux": {"flag4"}, + }, + }, + } + for _, rt := range usecases { + t.Run(rt.name, func(t *testing.T) { + + var dummy1, dummy2 string + cmd := &cobra.Command{ + Use: "init", + } + + cmd.Flags().StringVarP(&dummy1, "flag1", "a", "a", "a") + cmd.Flags().StringVarP(&dummy2, "flag2", "b", "b", "b") + + if rt.setAdditionalFlags != nil { + rt.runner.SetAdditionalFlags(rt.setAdditionalFlags) + } + + rt.runner.BindToCommand(cmd) + + // in case of no phases, checks that cmd is untouched + if len(rt.runner.Phases) == 0 { + if cmd.Long != "" { + t.Error("cmd.Long is set while it should be leaved untouched\n") + } + + if cmd.Flags().Lookup("skip-phases") != nil { + t.Error("cmd has skip-phases flag while it should not\n") + } + + if getCmd(cmd, "phase") != nil { + t.Error("cmd has phase subcommand while it should not\n") + } + + return + } + + // Otherwise, if there are phases + + // Checks that cmd get the description set and the skip-phases flags + if cmd.Long == "" { + t.Error("cmd.Long not set\n") + } + + if cmd.Flags().Lookup("skip-phases") == nil { + t.Error("cmd didn't have skip-phases flag\n") + } + + // Checks that cmd gets a new phase subcommand (without local flags) + phaseCmd := getCmd(cmd, "phase") + if phaseCmd == nil { + t.Error("cmd didn't have phase subcommand\n") + return + } + if err := cmdHasFlags(phaseCmd); err != nil { + t.Errorf("command phase didn't have expected flags: %v\n", err) + } + + // Checks that cmd subcommand gets subcommand for phases (without flags properly sets) + for c, flags := range rt.expectedCmdAndFlags { + + cCmd := getCmd(cmd, c) + if cCmd == nil { + t.Errorf("cmd didn't have %s subcommand\n", c) + continue + } + + if err := cmdHasFlags(cCmd, flags...); err != nil { + t.Errorf("command %s didn't have expected flags: %v\n", c, err) + } + } + + }) + } +} + +func getCmd(parent *cobra.Command, nestedName string) *cobra.Command { + names := strings.Split(nestedName, " ") + for i, n := range names { + for _, c := range parent.Commands() { + if c.Name() == n { + if i == len(names)-1 { + return c + } + parent = c + } + } + } + + return nil +} + +func cmdHasFlags(cmd *cobra.Command, expectedFlags ...string) error { + flags := []string{} + cmd.Flags().VisitAll(func(f *pflag.Flag) { + flags = append(flags, f.Name) + }) + + for _, e := range expectedFlags { + found := false + for _, f := range flags { + if f == e { + found = true + } + } + if !found { + return errors.Errorf("flag %q does not exists in %s", e, flags) + } + } + + if len(flags) != len(expectedFlags) { + return errors.Errorf("expected flags %s, got %s", expectedFlags, flags) + } + + return nil +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants.go b/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants.go new file mode 100644 index 00000000000..3e357f8bbe5 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants.go @@ -0,0 +1,151 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package constants + +import ( + "fmt" + "path/filepath" + "time" + + "k8s.io/apimachinery/pkg/util/version" + bootstrapapi "k8s.io/cluster-bootstrap/token/api" +) + +const ( + // KubernetesDir is the directory Kubernetes owns for storing various configuration files + KubernetesDir = "/etc/kubernetes" + // ManifestsSubDirName defines directory name to store manifests + ManifestsSubDirName = "manifests" + + // CACertAndKeyBaseName defines certificate authority base name + CACertAndKeyBaseName = "ca" + // CACertName defines certificate name + CACertName = "ca.crt" + + // AdminKubeConfigFileName defines name for the kubeconfig aimed to be used by the superuser/admin of the cluster + AdminKubeConfigFileName = "admin.conf" + + // KubeletKubeConfigFileName defines the file name for the kubeconfig that the control-plane kubelet will use for talking + // to the API server + KubeletKubeConfigFileName = "kubelet.conf" + // ControllerManagerKubeConfigFileName defines the file name for the controller manager's kubeconfig file + ControllerManagerKubeConfigFileName = "controller-manager.conf" + // SchedulerKubeConfigFileName defines the file name for the scheduler's kubeconfig file + SchedulerKubeConfigFileName = "scheduler.conf" + + // Some well-known users and groups in the core Kubernetes authorization system + + // APICallRetryInterval defines how long kubeadm should wait before retrying a failed API operation + APICallRetryInterval = 500 * time.Millisecond + // DiscoveryRetryInterval specifies how long kubeadm should wait before retrying to connect to the control-plane when doing discovery + DiscoveryRetryInterval = 5 * time.Second + // PatchNodeTimeout specifies how long kubeadm should wait for applying the label and taint on the control-plane before timing out + PatchNodeTimeout = 2 * time.Minute + // TLSBootstrapTimeout specifies how long kubeadm should wait for the kubelet to perform the TLS Bootstrap + TLSBootstrapTimeout = 5 * time.Minute + // TLSBootstrapRetryInterval specifies how long kubeadm should wait before retrying the TLS Bootstrap check + TLSBootstrapRetryInterval = 5 * time.Second + // APICallWithWriteTimeout specifies how long kubeadm should wait for api calls with at least one write + APICallWithWriteTimeout = 40 * time.Second + // APICallWithReadTimeout specifies how long kubeadm should wait for api calls with only reads + APICallWithReadTimeout = 15 * time.Second + // PullImageRetry specifies how many times ContainerRuntime retries when pulling image failed + PullImageRetry = 5 + + // AnnotationKubeadmCRISocket specifies the annotation kubeadm uses to preserve the crisocket information given to kubeadm at + // init/join time for use later. kubeadm annotates the node object with this information + AnnotationKubeadmCRISocket = "kubeadm.alpha.kubernetes.io/cri-socket" + + // KubeadmConfigConfigMap specifies in what ConfigMap in the kube-system namespace the `kubeadm init` configuration should be stored + KubeadmConfigConfigMap = "kubeadm-config" + + // ClusterConfigurationConfigMapKey specifies in what ConfigMap key the cluster configuration should be stored + ClusterConfigurationConfigMapKey = "ClusterConfiguration" + + // KubeletBaseConfigurationConfigMapPrefix specifies in what ConfigMap in the kube-system namespace the initial remote configuration of kubelet should be stored + KubeletBaseConfigurationConfigMapPrefix = "kubelet-config-" + + // KubeletBaseConfigurationConfigMapKey specifies in what ConfigMap key the initial remote configuration of kubelet should be stored + KubeletBaseConfigurationConfigMapKey = "kubelet" + + // KubeletRunDirectory specifies the directory where the kubelet runtime information is stored. + KubeletRunDirectory = "/var/lib/kubelet" + + // KubeletConfigurationFileName specifies the file name on the node which stores initial remote configuration of kubelet + // This file should exist under KubeletRunDirectory + KubeletConfigurationFileName = "config.yaml" + + // KubeletEnvFileName is a file "kubeadm init" writes at runtime. Using that interface, kubeadm can customize certain + // kubelet flags conditionally based on the environment at runtime. Also, parameters given to the configuration file + // might be passed through this file. "kubeadm init" writes one variable, with the name ${KubeletEnvFileVariableName}. + // This file should exist under KubeletRunDirectory + KubeletEnvFileName = "kubeadm-flags.env" + + // KubeletEnvFileVariableName specifies the shell script variable name "kubeadm init" should write a value to in KubeletEnvFile + KubeletEnvFileVariableName = "KUBELET_KUBEADM_ARGS" + + // KubeletHealthzPort is the port of the kubelet healthz endpoint + KubeletHealthzPort = 10248 + + // NodeBootstrapTokenAuthGroup specifies which group a Node Bootstrap Token should be authenticated in + NodeBootstrapTokenAuthGroup = "system:bootstrappers:kubeadm:default-node-token" + + // KubeAPIServer defines variable used internally when referring to kube-apiserver component + KubeAPIServer = "kube-apiserver" + // KubeControllerManager defines variable used internally when referring to kube-controller-manager component + KubeControllerManager = "kube-controller-manager" + // KubeScheduler defines variable used internally when referring to kube-scheduler component + KubeScheduler = "kube-scheduler" + + // KubeletPort is the default port for the kubelet server on each host machine. + // May be overridden by a flag at startup. + KubeletPort = 10250 +) + +var ( + // DefaultTokenUsages specifies the default functions a token will get + DefaultTokenUsages = bootstrapapi.KnownTokenUsages + + // DefaultTokenGroups specifies the default groups that this token will authenticate as when used for authentication + DefaultTokenGroups = []string{NodeBootstrapTokenAuthGroup} + + // ControlPlaneComponents defines the control-plane component names + ControlPlaneComponents = []string{KubeAPIServer, KubeControllerManager, KubeScheduler} + + // MinimumKubeletVersion specifies the minimum version of kubelet which kubeadm supports + MinimumKubeletVersion = version.MustParseSemantic("v1.17.0") +) + +// GetStaticPodDirectory returns the location on the disk where the Static Pod should be present +func GetStaticPodDirectory() string { + return filepath.Join(KubernetesDir, ManifestsSubDirName) +} + +// GetStaticPodFilepath returns the location on the disk where the Static Pod should be present +func GetStaticPodFilepath(componentName, manifestsDir string) string { + return filepath.Join(manifestsDir, componentName+".yaml") +} + +// GetAdminKubeConfigPath returns the location on the disk where admin kubeconfig is located by default +func GetAdminKubeConfigPath() string { + return filepath.Join(KubernetesDir, AdminKubeConfigFileName) +} + +// GetKubeletConfigMapName returns the right ConfigMap name for the right branch of k8s +func GetKubeletConfigMapName(k8sVersion *version.Version) string { + return fmt.Sprintf("%s%d.%d", KubeletBaseConfigurationConfigMapPrefix, k8sVersion.Major(), k8sVersion.Minor()) +} diff --git a/pkg/yurtctl/cmd/join/phases/type.go b/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_unix.go similarity index 70% rename from pkg/yurtctl/cmd/join/phases/type.go rename to pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_unix.go index d9ad6c5dcdb..16ff72d5556 100644 --- a/pkg/yurtctl/cmd/join/phases/type.go +++ b/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_unix.go @@ -1,5 +1,7 @@ +// +build !windows + /* -Copyright 2021 The OpenYurt Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +16,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -package phases +package constants -import ( - joinphases "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/join" +const ( + // DefaultDockerCRISocket defines the default Docker CRI socket + DefaultDockerCRISocket = "/var/run/dockershim.sock" ) - -type YurtJoinData interface { - joinphases.JoinData - NodeType() string - YurtHubImage() string -} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_windows.go b/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_windows.go new file mode 100644 index 00000000000..6daae0a1fff --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_windows.go @@ -0,0 +1,24 @@ +// +build windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package constants + +const ( + // DefaultDockerCRISocket defines the default Docker CRI socket + DefaultDockerCRISocket = "npipe:////./pipe/docker_engine" +) diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/discovery/token/token.go b/pkg/yurtctl/kubernetes/kubeadm/app/discovery/token/token.go new file mode 100644 index 00000000000..e17ca6ea3d7 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/discovery/token/token.go @@ -0,0 +1,242 @@ +/* +Copyright 2016 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package token + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + certutil "k8s.io/client-go/util/cert" + bootstrapapi "k8s.io/cluster-bootstrap/token/api" + bootstrap "k8s.io/cluster-bootstrap/token/jws" + "k8s.io/klog/v2" + + "github.com/openyurtio/openyurt/pkg/yurtctl/cmd/join/joindata" + kubeadmapi "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" + kubeconfigutil "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/pubkeypin" +) + +// BootstrapUser defines bootstrap user name +const BootstrapUser = "token-bootstrap-client" + +// TokenUser defines token user +const TokenUser = "tls-bootstrap-token-user" + +// RetrieveBootstrapConfig get clientcmdapi config by bootstrap token +func RetrieveBootstrapConfig(data joindata.YurtJoinData) (*clientcmdapi.Config, error) { + cfg, err := retrieveValidatedConfigInfo(nil, data) + if err != nil { + return nil, err + } + + clusterinfo := kubeconfigutil.GetClusterFromKubeConfig(cfg) + return kubeconfigutil.CreateWithToken( + fmt.Sprintf("https://%s", data.ServerAddr()), + "kubernetes", + TokenUser, + clusterinfo.CertificateAuthorityData, + data.JoinToken(), + ), nil +} + +// retrieveValidatedConfigInfo is a private implementation of RetrieveValidatedConfigInfo. +// It accepts an optional clientset that can be used for testing purposes. +func retrieveValidatedConfigInfo(client clientset.Interface, data joindata.YurtJoinData) (*clientcmdapi.Config, error) { + token, err := kubeadmapi.NewBootstrapTokenString(data.JoinToken()) + if err != nil { + return nil, err + } + + // Load the CACertHashes into a pubkeypin.Set + pubKeyPins := pubkeypin.NewSet() + if err = pubKeyPins.Allow(data.CaCertHashes().List()...); err != nil { + return nil, err + } + + endpoint := data.ServerAddr() + insecureBootstrapConfig := buildInsecureBootstrapKubeConfig(endpoint, "kubernetes") + clusterName := insecureBootstrapConfig.Contexts[insecureBootstrapConfig.CurrentContext].Cluster + + klog.V(1).Infof("[discovery] Created cluster-info discovery client, requesting info from %q", endpoint) + insecureClusterInfo, err := getClusterInfo(client, insecureBootstrapConfig, token, constants.DiscoveryRetryInterval, constants.PatchNodeTimeout) + if err != nil { + return nil, err + } + + // Validate the token in the cluster info + insecureKubeconfigBytes, err := validateClusterInfoToken(insecureClusterInfo, token) + if err != nil { + return nil, err + } + + // Load the insecure config + insecureConfig, err := clientcmd.Load(insecureKubeconfigBytes) + if err != nil { + return nil, errors.Wrapf(err, "couldn't parse the kubeconfig file in the %s ConfigMap", bootstrapapi.ConfigMapClusterInfo) + } + + // The ConfigMap should contain a single cluster + if len(insecureConfig.Clusters) != 1 { + return nil, errors.Errorf("expected the kubeconfig file in the %s ConfigMap to have a single cluster, but it had %d", bootstrapapi.ConfigMapClusterInfo, len(insecureConfig.Clusters)) + } + + // If no TLS root CA pinning was specified, we're done + if pubKeyPins.Empty() { + klog.V(1).Infof("[discovery] Cluster info signature and contents are valid and no TLS pinning was specified, will use API Server %q", endpoint) + return insecureConfig, nil + } + + // Load and validate the cluster CA from the insecure kubeconfig + clusterCABytes, err := validateClusterCA(insecureConfig, pubKeyPins) + if err != nil { + return nil, err + } + + // Now that we know the cluster CA, connect back a second time validating with that CA + secureBootstrapConfig := buildSecureBootstrapKubeConfig(endpoint, clusterCABytes, clusterName) + + klog.V(1).Infof("[discovery] Requesting info from %q again to validate TLS against the pinned public key", endpoint) + secureClusterInfo, err := getClusterInfo(client, secureBootstrapConfig, token, constants.DiscoveryRetryInterval, constants.PatchNodeTimeout) + if err != nil { + return nil, err + } + + // Pull the kubeconfig from the securely-obtained ConfigMap and validate that it's the same as what we found the first time + secureKubeconfigBytes := []byte(secureClusterInfo.Data[bootstrapapi.KubeConfigKey]) + if !bytes.Equal(secureKubeconfigBytes, insecureKubeconfigBytes) { + return nil, errors.Errorf("the second kubeconfig from the %s ConfigMap (using validated TLS) was different from the first", bootstrapapi.ConfigMapClusterInfo) + } + + secureKubeconfig, err := clientcmd.Load(secureKubeconfigBytes) + if err != nil { + return nil, errors.Wrapf(err, "couldn't parse the kubeconfig file in the %s ConfigMap", bootstrapapi.ConfigMapClusterInfo) + } + + klog.V(1).Infof("[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server %q", endpoint) + + return secureKubeconfig, nil +} + +// buildInsecureBootstrapKubeConfig makes a kubeconfig object that connects insecurely to the API Server for bootstrapping purposes +func buildInsecureBootstrapKubeConfig(endpoint, clustername string) *clientcmdapi.Config { + controlPlaneEndpoint := fmt.Sprintf("https://%s", endpoint) + bootstrapConfig := kubeconfigutil.CreateBasic(controlPlaneEndpoint, clustername, BootstrapUser, []byte{}) + bootstrapConfig.Clusters[clustername].InsecureSkipTLSVerify = true + return bootstrapConfig +} + +// buildSecureBootstrapKubeConfig makes a kubeconfig object that connects securely to the API Server for bootstrapping purposes (validating with the specified CA) +func buildSecureBootstrapKubeConfig(endpoint string, caCert []byte, clustername string) *clientcmdapi.Config { + controlPlaneEndpoint := fmt.Sprintf("https://%s", endpoint) + bootstrapConfig := kubeconfigutil.CreateBasic(controlPlaneEndpoint, clustername, BootstrapUser, caCert) + return bootstrapConfig +} + +// validateClusterInfoToken validates that the JWS token present in the cluster info ConfigMap is valid +func validateClusterInfoToken(insecureClusterInfo *v1.ConfigMap, token *kubeadmapi.BootstrapTokenString) ([]byte, error) { + insecureKubeconfigString, ok := insecureClusterInfo.Data[bootstrapapi.KubeConfigKey] + if !ok || len(insecureKubeconfigString) == 0 { + return nil, errors.Errorf("there is no %s key in the %s ConfigMap. This API Server isn't set up for token bootstrapping, can't connect", + bootstrapapi.KubeConfigKey, bootstrapapi.ConfigMapClusterInfo) + } + + detachedJWSToken, ok := insecureClusterInfo.Data[bootstrapapi.JWSSignatureKeyPrefix+token.ID] + if !ok || len(detachedJWSToken) == 0 { + return nil, errors.Errorf("token id %q is invalid for this cluster or it has expired. Use \"kubeadm token create\" on the control-plane node to create a new valid token", token.ID) + } + + if !bootstrap.DetachedTokenIsValid(detachedJWSToken, insecureKubeconfigString, token.ID, token.Secret) { + return nil, errors.New("failed to verify JWS signature of received cluster info object, can't trust this API Server") + } + + return []byte(insecureKubeconfigString), nil +} + +// validateClusterCA validates the cluster CA found in the insecure kubeconfig +func validateClusterCA(insecureConfig *clientcmdapi.Config, pubKeyPins *pubkeypin.Set) ([]byte, error) { + var clusterCABytes []byte + for _, cluster := range insecureConfig.Clusters { + clusterCABytes = cluster.CertificateAuthorityData + } + + clusterCAs, err := certutil.ParseCertsPEM(clusterCABytes) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse cluster CA from the %s ConfigMap", bootstrapapi.ConfigMapClusterInfo) + } + + // Validate the cluster CA public key against the pinned set + err = pubKeyPins.CheckAny(clusterCAs) + if err != nil { + return nil, errors.Wrapf(err, "cluster CA found in %s ConfigMap is invalid", bootstrapapi.ConfigMapClusterInfo) + } + + return clusterCABytes, nil +} + +// getClusterInfo creates a client from the given kubeconfig if the given client is nil, +// and requests the cluster info ConfigMap using PollImmediate. +// If a client is provided it will be used instead. +func getClusterInfo(client clientset.Interface, kubeconfig *clientcmdapi.Config, token *kubeadmapi.BootstrapTokenString, interval, duration time.Duration) (*v1.ConfigMap, error) { + var cm *v1.ConfigMap + var err error + + // Create client from kubeconfig + if client == nil { + client, err = kubeconfigutil.ToClientSet(kubeconfig) + if err != nil { + return nil, err + } + } + + ctx, cancel := context.WithTimeout(context.TODO(), duration) + defer cancel() + + wait.JitterUntil(func() { + cm, err = client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) + if err != nil { + klog.V(1).Infof("[discovery] Failed to request cluster-info, will try again: %v", err) + return + } + // Even if the ConfigMap is available the JWS signature is patched-in a bit later. + // Make sure we retry util then. + if _, ok := cm.Data[bootstrapapi.JWSSignatureKeyPrefix+token.ID]; !ok { + klog.V(1).Infof("[discovery] The cluster-info ConfigMap does not yet contain a JWS signature for token ID %q, will try again", token.ID) + err = errors.Errorf("could not find a JWS signature in the cluster-info ConfigMap for token ID %q", token.ID) + return + } + // Cancel the context on success + cancel() + }, interval, 0.3, true, ctx.Done()) + + if err != nil { + return nil, err + } + + return cm, nil +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/phases/bootstraptoken/clusterinfo/clusterinfo.go b/pkg/yurtctl/kubernetes/kubeadm/app/phases/bootstraptoken/clusterinfo/clusterinfo.go new file mode 100644 index 00000000000..bfb600465b5 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/phases/bootstraptoken/clusterinfo/clusterinfo.go @@ -0,0 +1,117 @@ +/* +Copyright 2017 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterinfo + +import ( + "fmt" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/authentication/user" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + bootstrapapi "k8s.io/cluster-bootstrap/token/api" + "k8s.io/klog/v2" + + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient" +) + +const ( + // BootstrapSignerClusterRoleName sets the name for the ClusterRole that allows access to ConfigMaps in the kube-public ns + BootstrapSignerClusterRoleName = "kubeadm:bootstrap-signer-clusterinfo" +) + +// CreateBootstrapConfigMapIfNotExists creates the kube-public ConfigMap if it doesn't exist already +func CreateBootstrapConfigMapIfNotExists(client clientset.Interface, file string) error { + + fmt.Printf("[bootstrap-token] Creating the %q ConfigMap in the %q namespace\n", bootstrapapi.ConfigMapClusterInfo, metav1.NamespacePublic) + + klog.V(1).Infoln("[bootstrap-token] loading admin kubeconfig") + adminConfig, err := clientcmd.LoadFromFile(file) + if err != nil { + return errors.Wrap(err, "failed to load admin kubeconfig") + } + + adminCluster := adminConfig.Contexts[adminConfig.CurrentContext].Cluster + // Copy the cluster from admin.conf to the bootstrap kubeconfig, contains the CA cert and the server URL + klog.V(1).Infoln("[bootstrap-token] copying the cluster from admin.conf to the bootstrap kubeconfig") + bootstrapConfig := &clientcmdapi.Config{ + Clusters: map[string]*clientcmdapi.Cluster{ + "": adminConfig.Clusters[adminCluster], + }, + } + bootstrapBytes, err := clientcmd.Write(*bootstrapConfig) + if err != nil { + return err + } + + // Create or update the ConfigMap in the kube-public namespace + klog.V(1).Infoln("[bootstrap-token] creating/updating ConfigMap in kube-public namespace") + return apiclient.CreateOrUpdateConfigMapWithTry(client, &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: bootstrapapi.ConfigMapClusterInfo, + Namespace: metav1.NamespacePublic, + }, + Data: map[string]string{ + bootstrapapi.KubeConfigKey: string(bootstrapBytes), + }, + }) +} + +// CreateClusterInfoRBACRules creates the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace to unauthenticated users +func CreateClusterInfoRBACRules(client clientset.Interface) error { + klog.V(1).Infoln("creating the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace") + err := apiclient.CreateOrUpdateRoleWithTry(client, &rbac.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: BootstrapSignerClusterRoleName, + Namespace: metav1.NamespacePublic, + }, + Rules: []rbac.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + ResourceNames: []string{bootstrapapi.ConfigMapClusterInfo}, + }, + }, + }) + if err != nil { + return err + } + + return apiclient.CreateOrUpdateRoleBinding(client, &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: BootstrapSignerClusterRoleName, + Namespace: metav1.NamespacePublic, + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbac.GroupName, + Kind: "Role", + Name: BootstrapSignerClusterRoleName, + }, + Subjects: []rbac.Subject{ + { + Kind: rbac.UserKind, + Name: user.Anonymous, + }, + }, + }) +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/phases/bootstraptoken/node/token.go b/pkg/yurtctl/kubernetes/kubeadm/app/phases/bootstraptoken/node/token.go new file mode 100644 index 00000000000..20414f52ce6 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/phases/bootstraptoken/node/token.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "context" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + bootstraputil "k8s.io/cluster-bootstrap/token/util" + + kubeadmapi "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient" +) + +// TODO(mattmoyer): Move CreateNewTokens, UpdateOrCreateTokens out of this package to client-go for a generic abstraction and client for a Bootstrap Token + +// CreateNewTokens tries to create a token and fails if one with the same ID already exists +func CreateNewTokens(client clientset.Interface, tokens []kubeadmapi.BootstrapToken) error { + return UpdateOrCreateTokens(client, true, tokens) +} + +// UpdateOrCreateTokens attempts to update a token with the given ID, or create if it does not already exist. +func UpdateOrCreateTokens(client clientset.Interface, failIfExists bool, tokens []kubeadmapi.BootstrapToken) error { + + for _, token := range tokens { + + secretName := bootstraputil.BootstrapTokenSecretName(token.Token.ID) + secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), secretName, metav1.GetOptions{}) + if secret != nil && err == nil && failIfExists { + return errors.Errorf("a token with id %q already exists", token.Token.ID) + } + + updatedOrNewSecret := token.ToSecret() + // Try to create or update the token with an exponential backoff + err = apiclient.TryRunCommand(func() error { + if err := apiclient.CreateOrUpdateSecret(client, updatedOrNewSecret); err != nil { + return errors.Wrapf(err, "failed to create or update bootstrap token with name %s", secretName) + } + return nil + }, 5) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags.go b/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags.go new file mode 100644 index 00000000000..88f0b94082f --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags.go @@ -0,0 +1,116 @@ +/* +Copyright 2018 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "k8s.io/klog/v2" + + "github.com/openyurtio/openyurt/pkg/projectinfo" + "github.com/openyurtio/openyurt/pkg/yurtctl/cmd/join/joindata" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" + kubeadmutil "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util" +) + +// WriteKubeletDynamicEnvFile writes an environment file with dynamic flags to the kubelet. +// Used at "kubeadm init" and "kubeadm join" time. +func WriteKubeletDynamicEnvFile(data joindata.YurtJoinData, kubeletDir string) error { + stringMap := buildKubeletArgMap(data) + argList := kubeadmutil.BuildArgumentListFromMap(stringMap, map[string]string{}) + envFileContent := fmt.Sprintf("%s=%q\n", constants.KubeletEnvFileVariableName, strings.Join(argList, " ")) + + return writeKubeletFlagBytesToDisk([]byte(envFileContent), kubeletDir) +} + +//buildKubeletArgMapCommon takes a kubeletFlagsOpts object and builds based on that a string-string map with flags +//that are common to both Linux and Windows +func buildKubeletArgMapCommon(data joindata.YurtJoinData) map[string]string { + kubeletFlags := map[string]string{} + + nodeReg := data.NodeRegistration() + if nodeReg.CRISocket == constants.DefaultDockerCRISocket { + // These flags should only be set when running docker + kubeletFlags["network-plugin"] = "cni" + if data.PauseImage() != "" { + kubeletFlags["pod-infra-container-image"] = data.PauseImage() + } + } else { + kubeletFlags["container-runtime"] = "remote" + kubeletFlags["container-runtime-endpoint"] = nodeReg.CRISocket + } + + hostname, err := os.Hostname() + if err != nil { + klog.Warning(err) + } + if nodeReg.Name != hostname { + klog.V(1).Infof("setting kubelet hostname-override to %q", nodeReg.Name) + kubeletFlags["hostname-override"] = nodeReg.Name + } + + kubeletFlags["node-labels"] = constructNodeLabels(data.NodeLabels(), nodeReg.WorkingMode, projectinfo.GetEdgeWorkerLabelKey()) + + kubeletFlags["rotate-certificates"] = "false" + + return kubeletFlags +} + +// constructNodeLabels make up node labels string +func constructNodeLabels(nodeLabels map[string]string, workingMode, edgeWorkerLabel string) string { + if nodeLabels == nil { + nodeLabels = make(map[string]string) + } + if _, ok := nodeLabels[edgeWorkerLabel]; !ok { + if workingMode == "cloud" { + nodeLabels[edgeWorkerLabel] = "false" + } else { + nodeLabels[edgeWorkerLabel] = "true" + } + } + var labelsStr string + for k, v := range nodeLabels { + if len(labelsStr) == 0 { + labelsStr = fmt.Sprintf("%s=%s", k, v) + } else { + labelsStr = fmt.Sprintf("%s,%s=%s", labelsStr, k, v) + } + } + + return labelsStr +} + +// writeKubeletFlagBytesToDisk writes a byte slice down to disk at the specific location of the kubelet flag overrides file +func writeKubeletFlagBytesToDisk(b []byte, kubeletDir string) error { + kubeletEnvFilePath := filepath.Join(kubeletDir, constants.KubeletEnvFileName) + fmt.Printf("[kubelet-start] Writing kubelet environment file with flags to file %q\n", kubeletEnvFilePath) + + // creates target folder if not already exists + if err := os.MkdirAll(kubeletDir, 0700); err != nil { + return errors.Wrapf(err, "failed to create directory %q", kubeletDir) + } + if err := ioutil.WriteFile(kubeletEnvFilePath, b, 0644); err != nil { + return errors.Wrapf(err, "failed to write kubelet configuration to the file %q", kubeletEnvFilePath) + } + return nil +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_test.go b/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_test.go new file mode 100644 index 00000000000..fb64fd67ac0 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "reflect" + "strings" + "testing" +) + +func TestConstructNodeLabels(t *testing.T) { + edgeWorkerLabel := "openyurt.io/is-edge-worker" + testcases := map[string]struct { + nodeLabels map[string]string + mode string + result map[string]string + }{ + "no input node labels with cloud mode": { + mode: "cloud", + result: map[string]string{ + "openyurt.io/is-edge-worker": "false", + }, + }, + "one input node labels with cloud mode": { + nodeLabels: map[string]string{"foo": "bar"}, + mode: "cloud", + result: map[string]string{ + "openyurt.io/is-edge-worker": "false", + "foo": "bar", + }, + }, + "more than one input node labels with cloud mode": { + nodeLabels: map[string]string{ + "foo": "bar", + "foo2": "bar2", + }, + mode: "cloud", + result: map[string]string{ + "openyurt.io/is-edge-worker": "false", + "foo": "bar", + "foo2": "bar2", + }, + }, + "no input node labels with edge mode": { + mode: "edge", + result: map[string]string{ + "openyurt.io/is-edge-worker": "true", + }, + }, + "one input node labels with edge mode": { + nodeLabels: map[string]string{"foo": "bar"}, + mode: "edge", + result: map[string]string{ + "openyurt.io/is-edge-worker": "true", + "foo": "bar", + }, + }, + } + + for k, tc := range testcases { + t.Run(k, func(t *testing.T) { + constructedLabelsStr := constructNodeLabels(tc.nodeLabels, tc.mode, edgeWorkerLabel) + constructedLabels := make(map[string]string) + parts := strings.Split(constructedLabelsStr, ",") + for i := range parts { + kv := strings.Split(parts[i], "=") + if len(kv) == 2 { + constructedLabels[kv[0]] = kv[1] + } + } + + if !reflect.DeepEqual(constructedLabels, tc.result) { + t.Errorf("expected node labels: %v, but got %v", tc.result, constructedLabels) + } + }) + } + +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_unix.go b/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_unix.go new file mode 100644 index 00000000000..17b13366605 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_unix.go @@ -0,0 +1,59 @@ +// +build !windows + +/* +Copyright 2020 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "k8s.io/klog/v2" + utilsexec "k8s.io/utils/exec" + + "github.com/openyurtio/openyurt/pkg/yurtctl/cmd/join/joindata" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" + kubeadmutil "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem" +) + +// buildKubeletArgMap takes a kubeletFlagsOpts object and builds based on that a string-string map with flags +// that should be given to the local Linux kubelet daemon. +func buildKubeletArgMap(data joindata.YurtJoinData) map[string]string { + kubeletFlags := buildKubeletArgMapCommon(data) + + // TODO: Conditionally set `--cgroup-driver` to either `systemd` or `cgroupfs` for CRI other than Docker + nodeReg := data.NodeRegistration() + if nodeReg.CRISocket == constants.DefaultDockerCRISocket { + driver, err := kubeadmutil.GetCgroupDriverDocker(utilsexec.New()) + if err != nil { + klog.Warningf("cannot automatically assign a '--cgroup-driver' value when starting the Kubelet: %v\n", err) + } else { + kubeletFlags["cgroup-driver"] = driver + } + } + + initSystem, err := initsystem.GetInitSystem() + if err != nil { + klog.Warningf("cannot get init system: %v\n", err) + return kubeletFlags + } + + if initSystem.ServiceIsActive("systemd-resolved") { + kubeletFlags["resolv-conf"] = "/run/systemd/resolve/resolv.conf" + } + + return kubeletFlags +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_windows.go b/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_windows.go new file mode 100644 index 00000000000..5300224cecc --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_windows.go @@ -0,0 +1,25 @@ +// +build windows + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +// buildKubeletArgMap takes a kubeletFlagsOpts object and builds based on that a string-string map with flags +// that should be given to the local Windows kubelet daemon. +func buildKubeletArgMap(opts kubeletFlagsOpts) map[string]string { + return buildKubeletArgMapCommon(opts) +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/kubelet.go b/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/kubelet.go new file mode 100644 index 00000000000..71bd27e1d07 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/kubelet.go @@ -0,0 +1,81 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem" +) + +// TryStartKubelet attempts to bring up kubelet service +func TryStartKubelet() { + // If we notice that the kubelet service is inactive, try to start it + initSystem, err := initsystem.GetInitSystem() + if err != nil { + fmt.Println("[kubelet-start] no supported init system detected, won't make sure the kubelet is running properly.") + return + } + + if !initSystem.ServiceExists("kubelet") { + fmt.Println("[kubelet-start] couldn't detect a kubelet service, can't make sure the kubelet is running properly.") + } + + // This runs "systemctl daemon-reload && systemctl restart kubelet" + if err := initSystem.ServiceRestart("kubelet"); err != nil { + fmt.Printf("[kubelet-start] WARNING: unable to start the kubelet service: [%v]\n", err) + fmt.Printf("[kubelet-start] Please ensure kubelet is reloaded and running manually.\n") + } +} + +// TryStopKubelet attempts to bring down the kubelet service momentarily +func TryStopKubelet() { + // If we notice that the kubelet service is inactive, try to start it + initSystem, err := initsystem.GetInitSystem() + if err != nil { + fmt.Println("[kubelet-start] no supported init system detected, won't make sure the kubelet not running for a short period of time while setting up configuration for it.") + return + } + + if !initSystem.ServiceExists("kubelet") { + fmt.Println("[kubelet-start] couldn't detect a kubelet service, can't make sure the kubelet not running for a short period of time while setting up configuration for it.") + } + + // This runs "systemctl daemon-reload && systemctl stop kubelet" + if err := initSystem.ServiceStop("kubelet"); err != nil { + fmt.Printf("[kubelet-start] WARNING: unable to stop the kubelet service momentarily: [%v]\n", err) + } +} + +// TryRestartKubelet attempts to restart the kubelet service +func TryRestartKubelet() { + // If we notice that the kubelet service is inactive, try to start it + initSystem, err := initsystem.GetInitSystem() + if err != nil { + fmt.Println("[kubelet-start] no supported init system detected, won't make sure the kubelet not running for a short period of time while setting up configuration for it.") + return + } + + if !initSystem.ServiceExists("kubelet") { + fmt.Println("[kubelet-start] couldn't detect a kubelet service, can't make sure the kubelet not running for a short period of time while setting up configuration for it.") + } + + // This runs "systemctl daemon-reload && systemctl stop kubelet" + if err := initSystem.ServiceRestart("kubelet"); err != nil { + fmt.Printf("[kubelet-start] WARNING: unable to restart the kubelet service momentarily: [%v]\n", err) + } +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks.go b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks.go new file mode 100644 index 00000000000..01862fafd32 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks.go @@ -0,0 +1,736 @@ +/* +Copyright 2016 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package preflight + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + versionutil "k8s.io/apimachinery/pkg/util/version" + "k8s.io/klog/v2" + system "k8s.io/system-validators/validators" + utilsexec "k8s.io/utils/exec" + + "github.com/openyurtio/openyurt/pkg/yurtctl/cmd/join/joindata" + kubeadmconstants "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem" + utilruntime "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime" +) + +const ( + bridgenf = "/proc/sys/net/bridge/bridge-nf-call-iptables" + ipv4Forward = "/proc/sys/net/ipv4/ip_forward" +) + +// Error defines struct for communicating error messages generated by preflight checks +type Error struct { + Msg string +} + +// Error implements the standard error interface +func (e *Error) Error() string { + return fmt.Sprintf("[preflight] Some fatal errors occurred:\n%s%s", e.Msg, "[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`") +} + +// Preflight identifies this error as a preflight error +func (e *Error) Preflight() bool { + return true +} + +// Checker validates the state of the system to ensure kubeadm will be +// successful as often as possible. +type Checker interface { + Check() (warnings, errorList []error) + Name() string +} + +// ContainerRuntimeCheck verifies the container runtime. +type ContainerRuntimeCheck struct { + runtime utilruntime.ContainerRuntime +} + +// Name returns label for RuntimeCheck. +func (ContainerRuntimeCheck) Name() string { + return "CRI" +} + +// Check validates the container runtime +func (crc ContainerRuntimeCheck) Check() (warnings, errorList []error) { + klog.V(1).Infoln("validating the container runtime") + if err := crc.runtime.IsRunning(); err != nil { + errorList = append(errorList, err) + } + return warnings, errorList +} + +// ServiceCheck verifies that the given service is enabled and active. If we do not +// detect a supported init system however, all checks are skipped and a warning is +// returned. +type ServiceCheck struct { + Service string + CheckIfActive bool + Label string +} + +// Name returns label for ServiceCheck. If not provided, will return based on the service parameter +func (sc ServiceCheck) Name() string { + if sc.Label != "" { + return sc.Label + } + return fmt.Sprintf("Service-%s", strings.Title(sc.Service)) +} + +// Check validates if the service is enabled and active. +func (sc ServiceCheck) Check() (warnings, errorList []error) { + klog.V(1).Infof("validating if the %q service is enabled and active", sc.Service) + initSystem, err := initsystem.GetInitSystem() + if err != nil { + return []error{err}, nil + } + + if !initSystem.ServiceExists(sc.Service) { + return []error{errors.Errorf("%s service does not exist", sc.Service)}, nil + } + + if !initSystem.ServiceIsEnabled(sc.Service) { + warnings = append(warnings, + errors.Errorf("%s service is not enabled, please run '%s'", + sc.Service, initSystem.EnableCommand(sc.Service))) + } + + if sc.CheckIfActive && !initSystem.ServiceIsActive(sc.Service) { + errorList = append(errorList, + errors.Errorf("%s service is not active, please run 'systemctl start %s.service'", + sc.Service, sc.Service)) + } + + return warnings, errorList +} + +// FirewalldCheck checks if firewalld is enabled or active. If it is, warn the user that there may be problems +// if no actions are taken. +type FirewalldCheck struct { + ports []int +} + +// Name returns label for FirewalldCheck. +func (FirewalldCheck) Name() string { + return "Firewalld" +} + +// Check validates if the firewall is enabled and active. +func (fc FirewalldCheck) Check() (warnings, errorList []error) { + klog.V(1).Infoln("validating if the firewall is enabled and active") + initSystem, err := initsystem.GetInitSystem() + if err != nil { + return []error{err}, nil + } + + if !initSystem.ServiceExists("firewalld") { + return nil, nil + } + + if initSystem.ServiceIsActive("firewalld") { + err := errors.Errorf("firewalld is active, please ensure ports %v are open or your cluster may not function correctly", + fc.ports) + return []error{err}, nil + } + + return nil, nil +} + +// PortOpenCheck ensures the given port is available for use. +type PortOpenCheck struct { + port int + label string +} + +// Name returns name for PortOpenCheck. If not known, will return "PortXXXX" based on port number +func (poc PortOpenCheck) Name() string { + if poc.label != "" { + return poc.label + } + return fmt.Sprintf("Port-%d", poc.port) +} + +// Check validates if the particular port is available. +func (poc PortOpenCheck) Check() (warnings, errorList []error) { + klog.V(1).Infof("validating availability of port %d", poc.port) + + ln, err := net.Listen("tcp", fmt.Sprintf(":%d", poc.port)) + if err != nil { + errorList = []error{errors.Errorf("Port %d is in use", poc.port)} + } + if ln != nil { + if err = ln.Close(); err != nil { + warnings = append(warnings, + errors.Errorf("when closing port %d, encountered %v", poc.port, err)) + } + } + + return warnings, errorList +} + +// IsPrivilegedUserCheck verifies user is privileged (linux - root, windows - Administrator) +type IsPrivilegedUserCheck struct{} + +// Name returns name for IsPrivilegedUserCheck +func (IsPrivilegedUserCheck) Name() string { + return "IsPrivilegedUser" +} + +// DirAvailableCheck checks if the given directory either does not exist, or is empty. +type DirAvailableCheck struct { + Path string + Label string +} + +// Name returns label for individual DirAvailableChecks. If not known, will return based on path. +func (dac DirAvailableCheck) Name() string { + if dac.Label != "" { + return dac.Label + } + return fmt.Sprintf("DirAvailable-%s", strings.Replace(dac.Path, "/", "-", -1)) +} + +// Check validates if a directory does not exist or empty. +func (dac DirAvailableCheck) Check() (warnings, errorList []error) { + klog.V(1).Infof("validating the existence and emptiness of directory %s", dac.Path) + + // If it doesn't exist we are good: + if _, err := os.Stat(dac.Path); os.IsNotExist(err) { + return nil, nil + } + + f, err := os.Open(dac.Path) + if err != nil { + return nil, []error{errors.Wrapf(err, "unable to check if %s is empty", dac.Path)} + } + defer f.Close() + + _, err = f.Readdirnames(1) + if err != io.EOF { + return nil, []error{errors.Errorf("%s is not empty", dac.Path)} + } + + return nil, nil +} + +// FileAvailableCheck checks that the given file does not already exist. +type FileAvailableCheck struct { + Path string + Label string +} + +// Name returns label for individual FileAvailableChecks. If not known, will return based on path. +func (fac FileAvailableCheck) Name() string { + if fac.Label != "" { + return fac.Label + } + return fmt.Sprintf("FileAvailable-%s", strings.Replace(fac.Path, "/", "-", -1)) +} + +// Check validates if the given file does not already exist. +func (fac FileAvailableCheck) Check() (warnings, errorList []error) { + klog.V(1).Infof("validating the existence of file %s", fac.Path) + + if _, err := os.Stat(fac.Path); err == nil { + return nil, []error{errors.Errorf("%s already exists", fac.Path)} + } + return nil, nil +} + +// FileExistingCheck checks that the given file does not already exist. +type FileExistingCheck struct { + Path string + Label string +} + +// Name returns label for individual FileExistingChecks. If not known, will return based on path. +func (fac FileExistingCheck) Name() string { + if fac.Label != "" { + return fac.Label + } + return fmt.Sprintf("FileExisting-%s", strings.Replace(fac.Path, "/", "-", -1)) +} + +// Check validates if the given file already exists. +func (fac FileExistingCheck) Check() (warnings, errorList []error) { + klog.V(1).Infof("validating the existence of file %s", fac.Path) + + if _, err := os.Stat(fac.Path); err != nil { + return nil, []error{errors.Errorf("%s doesn't exist", fac.Path)} + } + return nil, nil +} + +// FileContentCheck checks that the given file contains the string Content. +type FileContentCheck struct { + Path string + Content []byte + Label string +} + +// Name returns label for individual FileContentChecks. If not known, will return based on path. +func (fcc FileContentCheck) Name() string { + if fcc.Label != "" { + return fcc.Label + } + return fmt.Sprintf("FileContent-%s", strings.Replace(fcc.Path, "/", "-", -1)) +} + +// Check validates if the given file contains the given content. +func (fcc FileContentCheck) Check() (warnings, errorList []error) { + klog.V(1).Infof("validating the contents of file %s", fcc.Path) + f, err := os.Open(fcc.Path) + if err != nil { + return nil, []error{errors.Errorf("%s does not exist", fcc.Path)} + } + + lr := io.LimitReader(f, int64(len(fcc.Content))) + defer f.Close() + + buf := &bytes.Buffer{} + _, err = io.Copy(buf, lr) + if err != nil { + return nil, []error{errors.Errorf("%s could not be read", fcc.Path)} + } + + if !bytes.Equal(buf.Bytes(), fcc.Content) { + return nil, []error{errors.Errorf("%s contents are not set to %s", fcc.Path, fcc.Content)} + } + return nil, []error{} + +} + +// InPathCheck checks if the given executable is present in $PATH +type InPathCheck struct { + executable string + mandatory bool + exec utilsexec.Interface + label string + suggestion string +} + +// Name returns label for individual InPathCheck. If not known, will return based on path. +func (ipc InPathCheck) Name() string { + if ipc.label != "" { + return ipc.label + } + return fmt.Sprintf("FileExisting-%s", strings.Replace(ipc.executable, "/", "-", -1)) +} + +// Check validates if the given executable is present in the path. +func (ipc InPathCheck) Check() (warnings, errs []error) { + klog.V(1).Infof("validating the presence of executable %s", ipc.executable) + _, err := ipc.exec.LookPath(ipc.executable) + if err != nil { + if ipc.mandatory { + // Return as an error: + return nil, []error{errors.Errorf("%s not found in system path", ipc.executable)} + } + // Return as a warning: + warningMessage := fmt.Sprintf("%s not found in system path", ipc.executable) + if ipc.suggestion != "" { + warningMessage += fmt.Sprintf("\nSuggestion: %s", ipc.suggestion) + } + return []error{errors.New(warningMessage)}, nil + } + return nil, nil +} + +// HostnameCheck checks if hostname match dns sub domain regex. +// If hostname doesn't match this regex, kubelet will not launch static pods like kube-apiserver/kube-controller-manager and so on. +type HostnameCheck struct { + nodeName string +} + +// Name will return Hostname as name for HostnameCheck +func (HostnameCheck) Name() string { + return "Hostname" +} + +// Check validates if hostname match dns sub domain regex. +// Check hostname length and format +func (hc HostnameCheck) Check() (warnings, errorList []error) { + klog.V(1).Infoln("checking whether the given node name is valid and reachable using net.LookupHost") + for _, msg := range validation.IsQualifiedName(hc.nodeName) { + warnings = append(warnings, errors.Errorf("invalid node name format %q: %s", hc.nodeName, msg)) + } + + addr, err := net.LookupHost(hc.nodeName) + if addr == nil { + warnings = append(warnings, errors.Errorf("hostname \"%s\" could not be reached", hc.nodeName)) + } + if err != nil { + warnings = append(warnings, errors.Wrapf(err, "hostname \"%s\"", hc.nodeName)) + } + return warnings, errorList +} + +// SystemVerificationCheck defines struct used for running the system verification node check in test/e2e_node/system +type SystemVerificationCheck struct { + IsDocker bool +} + +// Name will return SystemVerification as name for SystemVerificationCheck +func (SystemVerificationCheck) Name() string { + return "SystemVerification" +} + +// Check runs all individual checks +func (sysver SystemVerificationCheck) Check() (warnings, errorList []error) { + klog.V(1).Infoln("running all checks") + // Create a buffered writer and choose a quite large value (1M) and suppose the output from the system verification test won't exceed the limit + // Run the system verification check, but write to out buffered writer instead of stdout + bufw := bufio.NewWriterSize(os.Stdout, 1*1024*1024) + reporter := &system.StreamReporter{WriteStream: bufw} + + var errs []error + var warns []error + // All the common validators we'd like to run: + var validators = []system.Validator{ + &system.KernelValidator{Reporter: reporter}} + + // run the docker validator only with docker runtime + if sysver.IsDocker { + validators = append(validators, &system.DockerValidator{Reporter: reporter}) + } + + if runtime.GOOS == "linux" { + //add linux validators + validators = append(validators, + &system.OSValidator{Reporter: reporter}, + &system.CgroupsValidator{Reporter: reporter}) + } + + // Run all validators + for _, v := range validators { + warn, err := v.Validate(system.DefaultSysSpec) + if err != nil { + errs = append(errs, err...) + } + if warn != nil { + warns = append(warns, warn...) + } + } + + if len(errs) != 0 { + // Only print the output from the system verification check if the check failed + fmt.Println("[preflight] The system verification failed. Printing the output from the verification:") + bufw.Flush() + return warns, errs + } + return warns, nil +} + +// KubernetesVersionCheck validates Kubernetes and kubeadm versions +type KubernetesVersionCheck struct { + KubeadmVersion string + KubernetesVersion string +} + +// Name will return KubernetesVersion as name for KubernetesVersionCheck +func (KubernetesVersionCheck) Name() string { + return "KubernetesVersion" +} + +// Check validates Kubernetes and kubeadm versions +func (kubever KubernetesVersionCheck) Check() (warnings, errorList []error) { + klog.V(1).Infoln("validating Kubernetes and kubeadm version") + // Skip this check for "super-custom builds", where apimachinery/the overall codebase version is not set. + if strings.HasPrefix(kubever.KubeadmVersion, "v0.0.0") { + return nil, nil + } + + kadmVersion, err := versionutil.ParseSemantic(kubever.KubeadmVersion) + if err != nil { + return nil, []error{errors.Wrapf(err, "couldn't parse kubeadm version %q", kubever.KubeadmVersion)} + } + + k8sVersion, err := versionutil.ParseSemantic(kubever.KubernetesVersion) + if err != nil { + return nil, []error{errors.Wrapf(err, "couldn't parse Kubernetes version %q", kubever.KubernetesVersion)} + } + + // Checks if k8sVersion greater or equal than the first unsupported versions by current version of kubeadm, + // that is major.minor+1 (all patch and pre-releases versions included) + // NB. in semver patches number is a numeric, while prerelease is a string where numeric identifiers always have lower precedence than non-numeric identifiers. + // thus setting the value to x.y.0-0 we are defining the very first patch - prereleases within x.y minor release. + firstUnsupportedVersion := versionutil.MustParseSemantic(fmt.Sprintf("%d.%d.%s", kadmVersion.Major(), kadmVersion.Minor()+1, "0-0")) + if k8sVersion.AtLeast(firstUnsupportedVersion) { + return []error{errors.Errorf("Kubernetes version is greater than kubeadm version. Please consider to upgrade kubeadm. Kubernetes version: %s. Kubeadm version: %d.%d.x", k8sVersion, kadmVersion.Components()[0], kadmVersion.Components()[1])}, nil + } + + return nil, nil +} + +// KubeletVersionCheck validates installed kubelet version +type KubeletVersionCheck struct { + KubernetesVersion string + exec utilsexec.Interface +} + +// Name will return KubeletVersion as name for KubeletVersionCheck +func (KubeletVersionCheck) Name() string { + return "KubeletVersion" +} + +// Check validates kubelet version. It should be not less than minimal supported version +func (kubever KubeletVersionCheck) Check() (warnings, errorList []error) { + klog.V(1).Infoln("validating kubelet version") + kubeletVersion, err := GetKubeletVersion(kubever.exec) + if err != nil { + return nil, []error{errors.Wrap(err, "couldn't get kubelet version")} + } + if kubeletVersion.LessThan(kubeadmconstants.MinimumKubeletVersion) { + return nil, []error{errors.Errorf("Kubelet version %q is lower than kubeadm can support. Please upgrade kubelet", kubeletVersion)} + } + + if kubever.KubernetesVersion != "" { + k8sVersion, err := versionutil.ParseSemantic(kubever.KubernetesVersion) + if err != nil { + return nil, []error{errors.Wrapf(err, "couldn't parse Kubernetes version %q", kubever.KubernetesVersion)} + } + if kubeletVersion.Major() > k8sVersion.Major() || kubeletVersion.Minor() > k8sVersion.Minor() { + return nil, []error{errors.Errorf("the kubelet version is higher than the control plane version. This is not a supported version skew and may lead to a malfunctional cluster. Kubelet version: %q Control plane version: %q", kubeletVersion, k8sVersion)} + } + } + return nil, nil +} + +// SwapCheck warns if swap is enabled +type SwapCheck struct{} + +// Name will return Swap as name for SwapCheck +func (SwapCheck) Name() string { + return "Swap" +} + +// Check validates whether swap is enabled or not +func (swc SwapCheck) Check() (warnings, errorList []error) { + klog.V(1).Infoln("validating whether swap is enabled or not") + f, err := os.Open("/proc/swaps") + if err != nil { + // /proc/swaps not available, thus no reasons to warn + return nil, nil + } + defer f.Close() + var buf []string + scanner := bufio.NewScanner(f) + for scanner.Scan() { + buf = append(buf, scanner.Text()) + } + if err := scanner.Err(); err != nil { + return nil, []error{errors.Wrap(err, "error parsing /proc/swaps")} + } + + if len(buf) > 1 { + return nil, []error{errors.New("running with swap on is not supported. Please disable swap")} + } + + return nil, nil +} + +// ImagePullCheck will pull container images used by kubeadm +type ImagePullCheck struct { + runtime utilruntime.ContainerRuntime + imageList []string + imagePullPolicy v1.PullPolicy +} + +// Name returns the label for ImagePullCheck +func (ImagePullCheck) Name() string { + return "ImagePull" +} + +// Check pulls images required by kubeadm. This is a mutating check +func (ipc ImagePullCheck) Check() (warnings, errorList []error) { + policy := ipc.imagePullPolicy + klog.V(1).Infof("using image pull policy: %s", policy) + for _, image := range ipc.imageList { + switch policy { + case v1.PullNever: + klog.V(1).Infof("skipping pull of image: %s", image) + continue + case v1.PullIfNotPresent: + ret, err := ipc.runtime.ImageExists(image) + if ret && err == nil { + klog.V(1).Infof("image exists: %s", image) + continue + } + if err != nil { + errorList = append(errorList, errors.Wrapf(err, "failed to check if image %s exists", image)) + } + fallthrough // Proceed with pulling the image if it does not exist + case v1.PullAlways: + klog.V(1).Infof("pulling: %s", image) + if err := ipc.runtime.PullImage(image); err != nil { + errorList = append(errorList, errors.Wrapf(err, "failed to pull image %s", image)) + } + default: + // If the policy is unknown return early with an error + errorList = append(errorList, errors.Errorf("unsupported pull policy %q", policy)) + return warnings, errorList + } + } + return warnings, errorList +} + +// NumCPUCheck checks if current number of CPUs is not less than required +type NumCPUCheck struct { + NumCPU int +} + +// Name returns the label for NumCPUCheck +func (NumCPUCheck) Name() string { + return "NumCPU" +} + +// Check number of CPUs required by kubeadm +func (ncc NumCPUCheck) Check() (warnings, errorList []error) { + numCPU := runtime.NumCPU() + if numCPU < ncc.NumCPU { + errorList = append(errorList, errors.Errorf("the number of available CPUs %d is less than the required %d", numCPU, ncc.NumCPU)) + } + return warnings, errorList +} + +// RunJoinNodeChecks executes all individual, applicable to node checks. +func RunJoinNodeChecks(execer utilsexec.Interface, data joindata.YurtJoinData) error { + // First, check if we're root separately from the other preflight checks and fail fast + if err := RunRootCheckOnly(data.IgnorePreflightErrors()); err != nil { + return err + } + + checks := []Checker{ + DirAvailableCheck{Path: filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ManifestsSubDirName)}, + FileExistingCheck{Path: filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.KubeletKubeConfigFileName)}, + FileExistingCheck{Path: filepath.Join(kubeadmconstants.KubernetesDir, "pki", kubeadmconstants.CACertName)}, + } + checks = addCommonChecks(execer, data, checks) + + return RunChecks(checks, os.Stderr, data.IgnorePreflightErrors()) +} + +// addCommonChecks is a helper function to duplicate checks that are common between both the +// kubeadm init and join commands +func addCommonChecks(execer utilsexec.Interface, data joindata.YurtJoinData, checks []Checker) []Checker { + containerRuntime, err := utilruntime.NewContainerRuntime(execer, data.NodeRegistration().CRISocket) + isDocker := false + if err != nil { + fmt.Printf("[preflight] WARNING: Couldn't create the interface used for talking to the container runtime: %v\n", err) + } else { + checks = append(checks, ContainerRuntimeCheck{runtime: containerRuntime}) + if containerRuntime.IsDocker() { + isDocker = true + checks = append(checks, ServiceCheck{Service: "docker", CheckIfActive: true}) + } + } + + // add image pull check for openyurt + checks = append(checks, ImagePullCheck{ + runtime: containerRuntime, + imageList: []string{data.PauseImage(), data.YurtHubImage()}, + imagePullPolicy: v1.PullIfNotPresent, + }) + + // non-windows checks + if runtime.GOOS == "linux" { + if !isDocker { + checks = append(checks, InPathCheck{executable: "crictl", mandatory: true, exec: execer}) + } + checks = append(checks, + FileContentCheck{Path: bridgenf, Content: []byte{'1'}}, + FileContentCheck{Path: ipv4Forward, Content: []byte{'1'}}, + SwapCheck{}, + InPathCheck{executable: "conntrack", mandatory: true, exec: execer}, + InPathCheck{executable: "ip", mandatory: true, exec: execer}, + InPathCheck{executable: "iptables", mandatory: true, exec: execer}, + InPathCheck{executable: "mount", mandatory: true, exec: execer}, + InPathCheck{executable: "nsenter", mandatory: true, exec: execer}, + InPathCheck{executable: "ebtables", mandatory: false, exec: execer}, + InPathCheck{executable: "ethtool", mandatory: false, exec: execer}, + InPathCheck{executable: "socat", mandatory: false, exec: execer}, + InPathCheck{executable: "tc", mandatory: false, exec: execer}, + InPathCheck{executable: "touch", mandatory: false, exec: execer}) + } + checks = append(checks, + SystemVerificationCheck{IsDocker: isDocker}, + HostnameCheck{nodeName: data.NodeRegistration().Name}, + KubeletVersionCheck{KubernetesVersion: data.KubernetesVersion(), exec: execer}, + ServiceCheck{Service: "kubelet", CheckIfActive: false}, + PortOpenCheck{port: kubeadmconstants.KubeletPort}) + return checks +} + +// RunRootCheckOnly initializes checks slice of structs and call RunChecks +func RunRootCheckOnly(ignorePreflightErrors sets.String) error { + checks := []Checker{ + IsPrivilegedUserCheck{}, + } + + return RunChecks(checks, os.Stderr, ignorePreflightErrors) +} + +// RunChecks runs each check, displays it's warnings/errors, and once all +// are processed will exit if any errors occurred. +func RunChecks(checks []Checker, ww io.Writer, ignorePreflightErrors sets.String) error { + var errsBuffer bytes.Buffer + + for _, c := range checks { + name := c.Name() + warnings, errs := c.Check() + + if setHasItemOrAll(ignorePreflightErrors, name) { + // Decrease severity of errors to warnings for this check + warnings = append(warnings, errs...) + errs = []error{} + } + + for _, w := range warnings { + io.WriteString(ww, fmt.Sprintf("\t[WARNING %s]: %v\n", name, w)) + } + for _, i := range errs { + errsBuffer.WriteString(fmt.Sprintf("\t[ERROR %s]: %v\n", name, i.Error())) + } + } + if errsBuffer.Len() > 0 { + return &Error{Msg: errsBuffer.String()} + } + return nil +} + +// setHasItemOrAll is helper function that return true if item is present in the set (case insensitive) or special key 'all' is present +func setHasItemOrAll(s sets.String, item string) bool { + if s.Has("all") || s.Has(strings.ToLower(item)) { + return true + } + return false +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_unix.go b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_unix.go new file mode 100644 index 00000000000..2bbbd9ca2d3 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package preflight + +import ( + "os" + + "github.com/pkg/errors" +) + +// Check validates if an user has elevated (root) privileges. +func (ipuc IsPrivilegedUserCheck) Check() (warnings, errorList []error) { + if os.Getuid() != 0 { + return nil, []error{errors.New("user is not running as root")} + } + + return nil, nil +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_windows.go b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_windows.go new file mode 100644 index 00000000000..3076480d2f2 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_windows.go @@ -0,0 +1,50 @@ +// +build windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package preflight + +import ( + "os/user" + + "github.com/pkg/errors" +) + +// The "Well-known SID" of Administrator group +// https://support.microsoft.com/en-us/help/243330/well-known-security-identifiers-in-windows-operating-systems +const administratorSID = "S-1-5-32-544" + +// Check validates if a user has elevated (administrator) privileges. +func (ipuc IsPrivilegedUserCheck) Check() (warnings, errorList []error) { + currUser, err := user.Current() + if err != nil { + return nil, []error{errors.Wrap(err, "cannot get current user")} + } + + groupIds, err := currUser.GroupIds() + if err != nil { + return nil, []error{errors.Wrap(err, "cannot get group IDs for current user")} + } + + for _, sid := range groupIds { + if sid == administratorSID { + return nil, nil + } + } + + return nil, []error{errors.New("user is not running as administrator")} +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/preflight/utils.go b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/utils.go new file mode 100644 index 00000000000..c5a2c335fca --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/utils.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package preflight + +import ( + "regexp" + "strings" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/version" + utilsexec "k8s.io/utils/exec" +) + +// GetKubeletVersion is helper function that returns version of kubelet available in $PATH +func GetKubeletVersion(execer utilsexec.Interface) (*version.Version, error) { + kubeletVersionRegex := regexp.MustCompile(`^\s*Kubernetes v((0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([-0-9a-zA-Z_\.+]*)?)\s*$`) + + command := execer.Command("kubelet", "--version") + out, err := command.Output() + if err != nil { + return nil, errors.Wrap(err, "cannot execute 'kubelet --version'") + } + + cleanOutput := strings.TrimSpace(string(out)) + subs := kubeletVersionRegex.FindAllStringSubmatch(cleanOutput, -1) + if len(subs) != 1 || len(subs[0]) < 2 { + return nil, errors.Errorf("Unable to parse output from Kubelet: %q", cleanOutput) + } + return version.ParseSemantic(subs[0][1]) +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/preflight/utils_test.go b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/utils_test.go new file mode 100644 index 00000000000..ad7944e3085 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/utils_test.go @@ -0,0 +1,64 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package preflight + +import ( + "testing" + + "github.com/pkg/errors" + utilsexec "k8s.io/utils/exec" + fakeexec "k8s.io/utils/exec/testing" +) + +func TestGetKubeletVersion(t *testing.T) { + cases := []struct { + output string + expected string + err error + valid bool + }{ + {"Kubernetes v1.7.0", "1.7.0", nil, true}, + {"Kubernetes v1.8.0-alpha.2.1231+afabd012389d53a", "1.8.0-alpha.2.1231+afabd012389d53a", nil, true}, + {"something-invalid", "", nil, false}, + {"command not found", "", errors.New("kubelet not found"), false}, + {"", "", nil, false}, + } + + for _, tc := range cases { + t.Run(tc.output, func(t *testing.T) { + fcmd := fakeexec.FakeCmd{ + OutputScript: []fakeexec.FakeAction{ + func() ([]byte, []byte, error) { return []byte(tc.output), nil, tc.err }, + }, + } + fexec := &fakeexec.FakeExec{ + CommandScript: []fakeexec.FakeCommandAction{ + func(cmd string, args ...string) utilsexec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + ver, err := GetKubeletVersion(fexec) + switch { + case err != nil && tc.valid: + t.Errorf("GetKubeletVersion: unexpected error for %q. Error: %v", tc.output, err) + case err == nil && !tc.valid: + t.Errorf("GetKubeletVersion: error expected for key %q, but result is %q", tc.output, ver) + case ver != nil && ver.String() != tc.expected: + t.Errorf("GetKubeletVersion: unexpected version result for key %q. Expected: %q Actual: %q", tc.output, tc.expected, ver) + } + }) + } +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/idempotency.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/idempotency.go new file mode 100644 index 00000000000..2237f05d976 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/idempotency.go @@ -0,0 +1,356 @@ +/* +Copyright 2017 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiclient + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/pkg/errors" + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + clientsetretry "k8s.io/client-go/util/retry" + + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" +) + +// ConfigMapMutator is a function that mutates the given ConfigMap and optionally returns an error +type ConfigMapMutator func(*v1.ConfigMap) error + +// TODO: We should invent a dynamic mechanism for this using the dynamic client instead of hard-coding these functions per-type + +// CreateOrUpdateConfigMap creates a ConfigMap if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. +func CreateOrUpdateConfigMap(client clientset.Interface, cm *v1.ConfigMap) error { + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + return errors.Wrap(err, "unable to create ConfigMap") + } + + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{}); err != nil { + return errors.Wrap(err, "unable to update ConfigMap") + } + } + return nil +} + +// CreateOrMutateConfigMap tries to create the ConfigMap provided as cm. If the resource exists already, the latest version will be fetched from +// the cluster and mutator callback will be called on it, then an Update of the mutated ConfigMap will be performed. This function is resilient +// to conflicts, and a retry will be issued if the ConfigMap was modified on the server between the refresh and the update (while the mutation was +// taking place) +func CreateOrMutateConfigMap(client clientset.Interface, cm *v1.ConfigMap, mutator ConfigMapMutator) error { + var lastError error + err := wait.PollImmediate(constants.APICallRetryInterval, constants.APICallWithWriteTimeout, func() (bool, error) { + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}); err != nil { + lastError = err + if apierrors.IsAlreadyExists(err) { + lastError = MutateConfigMap(client, metav1.ObjectMeta{Namespace: cm.ObjectMeta.Namespace, Name: cm.ObjectMeta.Name}, mutator) + return lastError == nil, nil + } + return false, nil + } + return true, nil + }) + if err == nil { + return nil + } + return lastError +} + +// MutateConfigMap takes a ConfigMap Object Meta (namespace and name), retrieves the resource from the server and tries to mutate it +// by calling to the mutator callback, then an Update of the mutated ConfigMap will be performed. This function is resilient +// to conflicts, and a retry will be issued if the ConfigMap was modified on the server between the refresh and the update (while the mutation was +// taking place). +func MutateConfigMap(client clientset.Interface, meta metav1.ObjectMeta, mutator ConfigMapMutator) error { + var lastError error + err := wait.PollImmediate(constants.APICallRetryInterval, constants.APICallWithWriteTimeout, func() (bool, error) { + configMap, err := client.CoreV1().ConfigMaps(meta.Namespace).Get(context.TODO(), meta.Name, metav1.GetOptions{}) + if err != nil { + lastError = err + return false, nil + } + if err = mutator(configMap); err != nil { + lastError = errors.Wrap(err, "unable to mutate ConfigMap") + return false, nil + } + _, lastError = client.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}) + return lastError == nil, nil + }) + if err == nil { + return nil + } + return lastError +} + +// CreateOrRetainConfigMap creates a ConfigMap if the target resource doesn't exist. If the resource exists already, this function will retain the resource instead. +func CreateOrRetainConfigMap(client clientset.Interface, cm *v1.ConfigMap, configMapName string) error { + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Get(context.TODO(), configMapName, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + return nil + } + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + return errors.Wrap(err, "unable to create ConfigMap") + } + } + } + return nil +} + +// CreateOrUpdateSecret creates a Secret if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. +func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error { + if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + return errors.Wrap(err, "unable to create secret") + } + + if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil { + return errors.Wrap(err, "unable to update secret") + } + } + return nil +} + +// CreateOrUpdateServiceAccount creates a ServiceAccount if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. +func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAccount) error { + if _, err := client.CoreV1().ServiceAccounts(sa.ObjectMeta.Namespace).Create(context.TODO(), sa, metav1.CreateOptions{}); err != nil { + // Note: We don't run .Update here afterwards as that's probably not required + // Only thing that could be updated is annotations/labels in .metadata, but we don't use that currently + if !apierrors.IsAlreadyExists(err) { + return errors.Wrap(err, "unable to create serviceaccount") + } + } + return nil +} + +// CreateOrUpdateDeployment creates a Deployment if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. +func CreateOrUpdateDeployment(client clientset.Interface, deploy *apps.Deployment) error { + if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + return errors.Wrap(err, "unable to create deployment") + } + + if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{}); err != nil { + return errors.Wrap(err, "unable to update deployment") + } + } + return nil +} + +// CreateOrRetainDeployment creates a Deployment if the target resource doesn't exist. If the resource exists already, this function will retain the resource instead. +func CreateOrRetainDeployment(client clientset.Interface, deploy *apps.Deployment, deployName string) error { + if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Get(context.TODO(), deployName, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + return nil + } + if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + return errors.Wrap(err, "unable to create deployment") + } + } + } + return nil +} + +// CreateOrUpdateDaemonSet creates a DaemonSet if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. +func CreateOrUpdateDaemonSet(client clientset.Interface, ds *apps.DaemonSet) error { + if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Create(context.TODO(), ds, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + return errors.Wrap(err, "unable to create daemonset") + } + + if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Update(context.TODO(), ds, metav1.UpdateOptions{}); err != nil { + return errors.Wrap(err, "unable to update daemonset") + } + } + return nil +} + +// DeleteDaemonSetForeground deletes the specified DaemonSet in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted +func DeleteDaemonSetForeground(client clientset.Interface, namespace, name string) error { + foregroundDelete := metav1.DeletePropagationForeground + return client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &foregroundDelete}) +} + +// DeleteDeploymentForeground deletes the specified Deployment in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted +func DeleteDeploymentForeground(client clientset.Interface, namespace, name string) error { + foregroundDelete := metav1.DeletePropagationForeground + return client.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &foregroundDelete}) +} + +// CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. +func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error { + var lastError error + err := wait.PollImmediate(constants.APICallRetryInterval, constants.APICallWithWriteTimeout, func() (bool, error) { + if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(context.TODO(), role, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + lastError = errors.Wrap(err, "unable to create RBAC role") + return false, nil + } + + if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Update(context.TODO(), role, metav1.UpdateOptions{}); err != nil { + lastError = errors.Wrap(err, "unable to update RBAC role") + return false, nil + } + } + return true, nil + }) + if err == nil { + return nil + } + return lastError +} + +// CreateOrUpdateRoleBinding creates a RoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. +func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.RoleBinding) error { + var lastError error + err := wait.PollImmediate(constants.APICallRetryInterval, constants.APICallWithWriteTimeout, func() (bool, error) { + if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Create(context.TODO(), roleBinding, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + lastError = errors.Wrap(err, "unable to create RBAC rolebinding") + return false, nil + } + + if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Update(context.TODO(), roleBinding, metav1.UpdateOptions{}); err != nil { + lastError = errors.Wrap(err, "unable to update RBAC rolebinding") + return false, nil + } + } + return true, nil + }) + if err == nil { + return nil + } + return lastError +} + +// CreateOrUpdateClusterRole creates a ClusterRole if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. +func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.ClusterRole) error { + if _, err := client.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + return errors.Wrap(err, "unable to create RBAC clusterrole") + } + + if _, err := client.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole, metav1.UpdateOptions{}); err != nil { + return errors.Wrap(err, "unable to update RBAC clusterrole") + } + } + return nil +} + +// CreateOrUpdateClusterRoleBinding creates a ClusterRoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. +func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBinding *rbac.ClusterRoleBinding) error { + if _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + return errors.Wrap(err, "unable to create RBAC clusterrolebinding") + } + + if _, err := client.RbacV1().ClusterRoleBindings().Update(context.TODO(), clusterRoleBinding, metav1.UpdateOptions{}); err != nil { + return errors.Wrap(err, "unable to update RBAC clusterrolebinding") + } + } + return nil +} + +// PatchNodeOnce executes patchFn on the node object found by the node name. +// This is a condition function meant to be used with wait.Poll. false, nil +// implies it is safe to try again, an error indicates no more tries should be +// made and true indicates success. +func PatchNodeOnce(client clientset.Interface, nodeName string, patchFn func(*v1.Node)) func() (bool, error) { + return func() (bool, error) { + // First get the node object + n, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + if err != nil { + // TODO this should only be for timeouts + return false, nil + } + + // The node may appear to have no labels at first, + // so we wait for it to get hostname label. + if _, found := n.ObjectMeta.Labels[v1.LabelHostname]; !found { + return false, nil + } + + oldData, err := json.Marshal(n) + if err != nil { + return false, errors.Wrapf(err, "failed to marshal unmodified node %q into JSON", n.Name) + } + + // Execute the mutating function + patchFn(n) + + newData, err := json.Marshal(n) + if err != nil { + return false, errors.Wrapf(err, "failed to marshal modified node %q into JSON", n.Name) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) + if err != nil { + return false, errors.Wrap(err, "failed to create two way merge patch") + } + + if _, err := client.CoreV1().Nodes().Patch(context.TODO(), n.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { + // TODO also check for timeouts + if apierrors.IsConflict(err) { + fmt.Println("Temporarily unable to update node metadata due to conflict (will retry)") + return false, nil + } + return false, errors.Wrapf(err, "error patching node %q through apiserver", n.Name) + } + + return true, nil + } +} + +// PatchNode tries to patch a node using patchFn for the actual mutating logic. +// Retries are provided by the wait package. +func PatchNode(client clientset.Interface, nodeName string, patchFn func(*v1.Node)) error { + // wait.Poll will rerun the condition function every interval function if + // the function returns false. If the condition function returns an error + // then the retries end and the error is returned. + return wait.Poll(constants.APICallRetryInterval, constants.PatchNodeTimeout, PatchNodeOnce(client, nodeName, patchFn)) +} + +// GetConfigMapWithRetry tries to retrieve a ConfigMap using the given client, +// retrying if we get an unexpected error. +// +// TODO: evaluate if this can be done better. Potentially remove the retry if feasible. +func GetConfigMapWithRetry(client clientset.Interface, namespace, name string) (*v1.ConfigMap, error) { + var cm *v1.ConfigMap + var lastError error + err := wait.ExponentialBackoff(clientsetretry.DefaultBackoff, func() (bool, error) { + var err error + cm, err = client.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + return true, nil + } + lastError = err + return false, nil + }) + if err == nil { + return cm, nil + } + return nil, lastError +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/idempotency_test.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/idempotency_test.go new file mode 100644 index 00000000000..13879cf0edd --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/idempotency_test.go @@ -0,0 +1,182 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiclient + +import ( + "context" + "testing" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + core "k8s.io/client-go/testing" +) + +const configMapName = "configmap" + +func TestPatchNodeNonErrorCases(t *testing.T) { + testcases := []struct { + name string + lookupName string + node v1.Node + success bool + }{ + { + name: "simple update", + lookupName: "testnode", + node: v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testnode", + Labels: map[string]string{v1.LabelHostname: ""}, + }, + }, + success: true, + }, + { + name: "node does not exist", + lookupName: "whale", + success: false, + }, + { + name: "node not labelled yet", + lookupName: "robin", + node: v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "robin", + }, + }, + success: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + client := fake.NewSimpleClientset() + _, err := client.CoreV1().Nodes().Create(context.TODO(), &tc.node, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("failed to create node to fake client: %v", err) + } + conditionFunction := PatchNodeOnce(client, tc.lookupName, func(node *v1.Node) { + node.Annotations = map[string]string{ + "updatedBy": "test", + } + }) + success, err := conditionFunction() + if err != nil { + t.Fatalf("did not expect error: %v", err) + } + if success != tc.success { + t.Fatalf("expected %v got %v", tc.success, success) + } + }) + } +} + +func TestCreateOrMutateConfigMap(t *testing.T) { + client := fake.NewSimpleClientset() + err := CreateOrMutateConfigMap(client, &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + "key": "some-value", + }, + }, func(cm *v1.ConfigMap) error { + t.Fatal("mutate should not have been called, since the ConfigMap should have been created instead of mutated") + return nil + }) + if err != nil { + t.Fatalf("error creating ConfigMap: %v", err) + } + _, err = client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), configMapName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("error retrieving ConfigMap: %v", err) + } +} + +func createClientAndConfigMap(t *testing.T) *fake.Clientset { + client := fake.NewSimpleClientset() + _, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + "key": "some-value", + }, + }, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("error creating ConfigMap: %v", err) + } + return client +} + +func TestMutateConfigMap(t *testing.T) { + client := createClientAndConfigMap(t) + + err := MutateConfigMap(client, metav1.ObjectMeta{ + Name: configMapName, + Namespace: metav1.NamespaceSystem, + }, func(cm *v1.ConfigMap) error { + cm.Data["key"] = "some-other-value" + return nil + }) + if err != nil { + t.Fatalf("error mutating regular ConfigMap: %v", err) + } + + cm, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), configMapName, metav1.GetOptions{}) + if cm.Data["key"] != "some-other-value" { + t.Fatalf("ConfigMap mutation was invalid, has: %q", cm.Data["key"]) + } +} + +func TestMutateConfigMapWithConflict(t *testing.T) { + client := createClientAndConfigMap(t) + + // Mimic that the first 5 updates of the ConfigMap returns a conflict, whereas the sixth update + // succeeds + conflict := 5 + client.PrependReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) { + update := action.(core.UpdateAction) + if conflict > 0 { + conflict-- + return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), configMapName, errors.New("conflict")) + } + return false, update.GetObject(), nil + }) + + err := MutateConfigMap(client, metav1.ObjectMeta{ + Name: configMapName, + Namespace: metav1.NamespaceSystem, + }, func(cm *v1.ConfigMap) error { + cm.Data["key"] = "some-other-value" + return nil + }) + if err != nil { + t.Fatalf("error mutating conflicting ConfigMap: %v", err) + } + + cm, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), configMapName, metav1.GetOptions{}) + if cm.Data["key"] != "some-other-value" { + t.Fatalf("ConfigMap mutation with conflict was invalid, has: %q", cm.Data["key"]) + } +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/tryidempotency.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/tryidempotency.go new file mode 100644 index 00000000000..36c43d2e4ea --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/tryidempotency.go @@ -0,0 +1,232 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiclient + +import ( + "time" + + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" +) + +// CreateOrUpdateConfigMapWithTry runs CreateOrUpdateSecret with try. +func CreateOrUpdateConfigMapWithTry(client clientset.Interface, cm *v1.ConfigMap) error { + backoff := getBackOff() + + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := CreateOrUpdateConfigMap(client, cm) + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} + +// CreateOrRetainConfigMapWithTry runs CreateOrRetainConfigMap with try. +func CreateOrRetainConfigMapWithTry(client clientset.Interface, cm *v1.ConfigMap, configMapName string) error { + backoff := getBackOff() + + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := CreateOrRetainConfigMap(client, cm, configMapName) + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} + +// CreateOrUpdateSecretWithTry runs CreateOrUpdateSecret with try. +func CreateOrUpdateSecretWithTry(client clientset.Interface, secret *v1.Secret) error { + backoff := getBackOff() + + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := CreateOrUpdateSecret(client, secret) + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} + +// CreateOrUpdateServiceAccountWithTry runs CreateOrUpdateServiceAccount with try. +func CreateOrUpdateServiceAccountWithTry(client clientset.Interface, sa *v1.ServiceAccount) error { + backoff := getBackOff() + + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := CreateOrUpdateServiceAccount(client, sa) + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} + +// CreateOrUpdateDeploymentWithTry runs CreateOrUpdateDeployment with try. +func CreateOrUpdateDeploymentWithTry(client clientset.Interface, deploy *apps.Deployment) error { + backoff := getBackOff() + + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := CreateOrUpdateDeployment(client, deploy) + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} + +// CreateOrUpdateDaemonSetWithTry runs CreateOrUpdateDaemonSet with try. +func CreateOrUpdateDaemonSetWithTry(client clientset.Interface, ds *apps.DaemonSet) error { + backoff := getBackOff() + + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := CreateOrUpdateDaemonSet(client, ds) + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} + +// DeleteDaemonSetForegroundWithTry runs DeleteDaemonSetForeground with try. +func DeleteDaemonSetForegroundWithTry(client clientset.Interface, namespace, name string) error { + backoff := getBackOff() + + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := DeleteDaemonSetForeground(client, namespace, name) + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} + +// DeleteDeploymentForegroundWithTry runs DeleteDeploymentForeground with try. +func DeleteDeploymentForegroundWithTry(client clientset.Interface, namespace, name string) error { + backoff := getBackOff() + + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := DeleteDeploymentForeground(client, namespace, name) + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} + +// CreateOrUpdateRoleWithTry runs CreateOrUpdateRole with try. +func CreateOrUpdateRoleWithTry(client clientset.Interface, role *rbac.Role) error { + backoff := getBackOff() + + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := CreateOrUpdateRole(client, role) + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} + +// CreateOrUpdateRoleBindingWithTry runs CreateOrUpdateRoleBinding with try. +func CreateOrUpdateRoleBindingWithTry(client clientset.Interface, roleBinding *rbac.RoleBinding) error { + backoff := getBackOff() + + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := CreateOrUpdateRoleBinding(client, roleBinding) + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} + +// CreateOrUpdateClusterRoleWithTry runs CreateOrUpdateClusterRole with try. +func CreateOrUpdateClusterRoleWithTry(client clientset.Interface, clusterRole *rbac.ClusterRole) error { + backoff := getBackOff() + + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := CreateOrUpdateClusterRole(client, clusterRole) + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} + +// CreateOrUpdateClusterRoleBindingWithTry runs CreateOrUpdateClusterRoleBinding with try. +func CreateOrUpdateClusterRoleBindingWithTry(client clientset.Interface, clusterRoleBinding *rbac.ClusterRoleBinding) error { + backoff := getBackOff() + + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := CreateOrUpdateClusterRoleBinding(client, clusterRoleBinding) + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} + +// CreateOrMutateConfigMapWithTry runs CreateOrUpdateClusterRoleBinding with try. +func CreateOrMutateConfigMapWithTry(client clientset.Interface, cm *v1.ConfigMap, mutator ConfigMapMutator) error { + backoff := getBackOff() + + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := CreateOrMutateConfigMap(client, cm, mutator) + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} + +// try 200 times, the interval is three seconds. +func getBackOff() wait.Backoff { + backoff := wait.Backoff{ + Duration: 3 * time.Second, + Factor: 1, + Steps: 200, + } + return backoff +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/wait.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/wait.go new file mode 100644 index 00000000000..7b5a6a31b7d --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient/wait.go @@ -0,0 +1,271 @@ +/* +Copyright 2018 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiclient + +import ( + "context" + "fmt" + "io" + "net/http" + "time" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + netutil "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + + kubeadmconstants "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" +) + +// Waiter is an interface for waiting for criteria in Kubernetes to happen +type Waiter interface { + // WaitForAPI waits for the API Server's /healthz endpoint to become "ok" + WaitForAPI() error + // WaitForPodsWithLabel waits for Pods in the kube-system namespace to become Ready + WaitForPodsWithLabel(kvLabel string) error + // WaitForPodToDisappear waits for the given Pod in the kube-system namespace to be deleted + WaitForPodToDisappear(staticPodName string) error + // WaitForStaticPodSingleHash fetches sha256 hash for the control plane static pod + WaitForStaticPodSingleHash(nodeName string, component string) (string, error) + // WaitForStaticPodHashChange waits for the given static pod component's static pod hash to get updated. + // By doing that we can be sure that the kubelet has restarted the given Static Pod + WaitForStaticPodHashChange(nodeName, component, previousHash string) error + // WaitForStaticPodControlPlaneHashes fetches sha256 hashes for the control plane static pods + WaitForStaticPodControlPlaneHashes(nodeName string) (map[string]string, error) + // WaitForHealthyKubelet blocks until the kubelet /healthz endpoint returns 'ok' + WaitForHealthyKubelet(initialTimeout time.Duration, healthzEndpoint string) error + // WaitForKubeletAndFunc is a wrapper for WaitForHealthyKubelet that also blocks for a function + WaitForKubeletAndFunc(f func() error) error + // SetTimeout adjusts the timeout to the specified duration + SetTimeout(timeout time.Duration) +} + +// KubeWaiter is an implementation of Waiter that is backed by a Kubernetes client +type KubeWaiter struct { + client clientset.Interface + timeout time.Duration + writer io.Writer +} + +// NewKubeWaiter returns a new Waiter object that talks to the given Kubernetes cluster +func NewKubeWaiter(client clientset.Interface, timeout time.Duration, writer io.Writer) Waiter { + return &KubeWaiter{ + client: client, + timeout: timeout, + writer: writer, + } +} + +// WaitForAPI waits for the API Server's /healthz endpoint to report "ok" +func (w *KubeWaiter) WaitForAPI() error { + start := time.Now() + return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) { + healthStatus := 0 + w.client.Discovery().RESTClient().Get().AbsPath("/healthz").Do(context.TODO()).StatusCode(&healthStatus) + if healthStatus != http.StatusOK { + return false, nil + } + + fmt.Printf("[apiclient] All control plane components are healthy after %f seconds\n", time.Since(start).Seconds()) + return true, nil + }) +} + +// WaitForPodsWithLabel will lookup pods with the given label and wait until they are all +// reporting status as running. +func (w *KubeWaiter) WaitForPodsWithLabel(kvLabel string) error { + + lastKnownPodNumber := -1 + return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) { + listOpts := metav1.ListOptions{LabelSelector: kvLabel} + pods, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), listOpts) + if err != nil { + fmt.Fprintf(w.writer, "[apiclient] Error getting Pods with label selector %q [%v]\n", kvLabel, err) + return false, nil + } + + if lastKnownPodNumber != len(pods.Items) { + fmt.Fprintf(w.writer, "[apiclient] Found %d Pods for label selector %s\n", len(pods.Items), kvLabel) + lastKnownPodNumber = len(pods.Items) + } + + if len(pods.Items) == 0 { + return false, nil + } + + for _, pod := range pods.Items { + if pod.Status.Phase != v1.PodRunning { + return false, nil + } + } + + return true, nil + }) +} + +// WaitForPodToDisappear blocks until it timeouts or gets a "NotFound" response from the API Server when getting the Static Pod in question +func (w *KubeWaiter) WaitForPodToDisappear(podName string) error { + return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) { + _, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).Get(context.TODO(), podName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + fmt.Printf("[apiclient] The old Pod %q is now removed (which is desired)\n", podName) + return true, nil + } + return false, nil + }) +} + +// WaitForHealthyKubelet blocks until the kubelet /healthz endpoint returns 'ok' +func (w *KubeWaiter) WaitForHealthyKubelet(initialTimeout time.Duration, healthzEndpoint string) error { + time.Sleep(initialTimeout) + fmt.Printf("[kubelet-check] Initial timeout of %v passed.\n", initialTimeout) + return TryRunCommand(func() error { + client := &http.Client{Transport: netutil.SetOldTransportDefaults(&http.Transport{})} + resp, err := client.Get(healthzEndpoint) + if err != nil { + fmt.Println("[kubelet-check] It seems like the kubelet isn't running or healthy.") + fmt.Printf("[kubelet-check] The HTTP call equal to 'curl -sSL %s' failed with error: %v.\n", healthzEndpoint, err) + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + fmt.Println("[kubelet-check] It seems like the kubelet isn't running or healthy.") + fmt.Printf("[kubelet-check] The HTTP call equal to 'curl -sSL %s' returned HTTP code %d\n", healthzEndpoint, resp.StatusCode) + return errors.New("the kubelet healthz endpoint is unhealthy") + } + return nil + }, 5) // a failureThreshold of five means waiting for a total of 155 seconds +} + +// WaitForKubeletAndFunc waits primarily for the function f to execute, even though it might take some time. If that takes a long time, and the kubelet +// /healthz continuously are unhealthy, kubeadm will error out after a period of exponential backoff +func (w *KubeWaiter) WaitForKubeletAndFunc(f func() error) error { + errorChan := make(chan error, 1) + + go func(errC chan error, waiter Waiter) { + if err := waiter.WaitForHealthyKubelet(40*time.Second, fmt.Sprintf("http://localhost:%d/healthz", kubeadmconstants.KubeletHealthzPort)); err != nil { + errC <- err + } + }(errorChan, w) + + go func(errC chan error, waiter Waiter) { + // This main goroutine sends whatever the f function returns (error or not) to the channel + // This in order to continue on success (nil error), or just fail if the function returns an error + errC <- f() + }(errorChan, w) + + // This call is blocking until one of the goroutines sends to errorChan + return <-errorChan +} + +// SetTimeout adjusts the timeout to the specified duration +func (w *KubeWaiter) SetTimeout(timeout time.Duration) { + w.timeout = timeout +} + +// WaitForStaticPodControlPlaneHashes blocks until it timeouts or gets a hash map for all components and their Static Pods +func (w *KubeWaiter) WaitForStaticPodControlPlaneHashes(nodeName string) (map[string]string, error) { + + componentHash := "" + var err error + mirrorPodHashes := map[string]string{} + for _, component := range kubeadmconstants.ControlPlaneComponents { + err = wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) { + componentHash, err = getStaticPodSingleHash(w.client, nodeName, component) + if err != nil { + return false, nil + } + return true, nil + }) + if err != nil { + return nil, err + } + mirrorPodHashes[component] = componentHash + } + + return mirrorPodHashes, nil +} + +// WaitForStaticPodSingleHash blocks until it timeouts or gets a hash for a single component and its Static Pod +func (w *KubeWaiter) WaitForStaticPodSingleHash(nodeName string, component string) (string, error) { + + componentPodHash := "" + var err error + err = wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) { + componentPodHash, err = getStaticPodSingleHash(w.client, nodeName, component) + if err != nil { + return false, nil + } + return true, nil + }) + + return componentPodHash, err +} + +// WaitForStaticPodHashChange blocks until it timeouts or notices that the Mirror Pod (for the Static Pod, respectively) has changed +// This implicitly means this function blocks until the kubelet has restarted the Static Pod in question +func (w *KubeWaiter) WaitForStaticPodHashChange(nodeName, component, previousHash string) error { + return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) { + + hash, err := getStaticPodSingleHash(w.client, nodeName, component) + if err != nil { + return false, nil + } + // We should continue polling until the UID changes + if hash == previousHash { + return false, nil + } + + return true, nil + }) +} + +// getStaticPodSingleHash computes hashes for a single Static Pod resource +func getStaticPodSingleHash(client clientset.Interface, nodeName string, component string) (string, error) { + + staticPodName := fmt.Sprintf("%s-%s", component, nodeName) + staticPod, err := client.CoreV1().Pods(metav1.NamespaceSystem).Get(context.TODO(), staticPodName, metav1.GetOptions{}) + if err != nil { + return "", err + } + + staticPodHash := staticPod.Annotations["kubernetes.io/config.hash"] + fmt.Printf("Static pod: %s hash: %s\n", staticPodName, staticPodHash) + return staticPodHash, nil +} + +// TryRunCommand runs a function a maximum of failureThreshold times, and retries on error. If failureThreshold is hit; the last error is returned +func TryRunCommand(f func() error, failureThreshold int) error { + backoff := wait.Backoff{ + Duration: 5 * time.Second, + Factor: 2, // double the timeout for every failure + Steps: failureThreshold, + } + return wait.ExponentialBackoff(backoff, func() (bool, error) { + err := f() + if err != nil { + // Retry until the timeout + return false, nil + } + // The last f() call was a success, return cleanly + return true, nil + }) +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/arguments.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/arguments.go new file mode 100644 index 00000000000..7f88b471a22 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/arguments.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "sort" + "strings" + + "github.com/pkg/errors" +) + +// BuildArgumentListFromMap takes two string-string maps, one with the base arguments and one +// with optional override arguments. In the return list override arguments will precede base +// arguments +func BuildArgumentListFromMap(baseArguments map[string]string, overrideArguments map[string]string) []string { + var command []string + var keys []string + + argsMap := make(map[string]string) + + for k, v := range baseArguments { + argsMap[k] = v + } + + for k, v := range overrideArguments { + argsMap[k] = v + } + + for k := range argsMap { + keys = append(keys, k) + } + + sort.Strings(keys) + for _, k := range keys { + command = append(command, fmt.Sprintf("--%s=%s", k, argsMap[k])) + } + + return command +} + +// ParseArgumentListToMap parses a CLI argument list in the form "--foo=bar" to a string-string map +func ParseArgumentListToMap(arguments []string) map[string]string { + resultingMap := map[string]string{} + for i, arg := range arguments { + key, val, err := parseArgument(arg) + + // Ignore if the first argument doesn't satisfy the criteria, it's most often the binary name + // Warn in all other cases, but don't error out. This can happen only if the user has edited the argument list by hand, so they might know what they are doing + if err != nil { + if i != 0 { + fmt.Printf("[kubeadm] WARNING: The component argument %q could not be parsed correctly. The argument must be of the form %q. Skipping...\n", arg, "--") + } + continue + } + + resultingMap[key] = val + } + return resultingMap +} + +// ReplaceArgument gets a command list; converts it to a map for easier modification, runs the provided function that +// returns a new modified map, and then converts the map back to a command string slice +func ReplaceArgument(command []string, argMutateFunc func(map[string]string) map[string]string) []string { + argMap := ParseArgumentListToMap(command) + + // Save the first command (the executable) if we're sure it's not an argument (i.e. no --) + var newCommand []string + if len(command) > 0 && !strings.HasPrefix(command[0], "--") { + newCommand = append(newCommand, command[0]) + } + newArgMap := argMutateFunc(argMap) + newCommand = append(newCommand, BuildArgumentListFromMap(newArgMap, map[string]string{})...) + return newCommand +} + +// parseArgument parses the argument "--foo=bar" to "foo" and "bar" +func parseArgument(arg string) (string, string, error) { + if !strings.HasPrefix(arg, "--") { + return "", "", errors.New("the argument should start with '--'") + } + if !strings.Contains(arg, "=") { + return "", "", errors.New("the argument should have a '=' between the flag and the value") + } + // Remove the starting -- + arg = strings.TrimPrefix(arg, "--") + // Split the string on =. Return only two substrings, since we want only key/value, but the value can include '=' as well + keyvalSlice := strings.SplitN(arg, "=", 2) + + // Make sure both a key and value is present + if len(keyvalSlice) != 2 { + return "", "", errors.New("the argument must have both a key and a value") + } + if len(keyvalSlice[0]) == 0 { + return "", "", errors.New("the argument must have a key") + } + + return keyvalSlice[0], keyvalSlice[1], nil +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/cgroupdriver.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/cgroupdriver.go new file mode 100644 index 00000000000..b7f8668b4dd --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/cgroupdriver.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "strings" + + "github.com/pkg/errors" + utilsexec "k8s.io/utils/exec" +) + +const ( + // CgroupDriverSystemd holds the systemd driver type + CgroupDriverSystemd = "systemd" + // CgroupDriverCgroupfs holds the cgroupfs driver type + CgroupDriverCgroupfs = "cgroupfs" +) + +// TODO: add support for detecting the cgroup driver for CRI other than +// Docker. Currently only Docker driver detection is supported: +// Discussion: +// https://github.com/kubernetes/kubeadm/issues/844 + +// GetCgroupDriverDocker runs 'docker info -f "{{.CgroupDriver}}"' to obtain the docker cgroup driver +func GetCgroupDriverDocker(execer utilsexec.Interface) (string, error) { + driver, err := callDockerInfo(execer) + if err != nil { + return "", err + } + return strings.TrimSuffix(driver, "\n"), nil +} + +func callDockerInfo(execer utilsexec.Interface) (string, error) { + out, err := execer.Command("docker", "info", "-f", "{{.CgroupDriver}}").Output() + if err != nil { + return "", errors.Wrap(err, "cannot execute 'docker info -f {{.CgroupDriver}}'") + } + return string(out), nil +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem.go new file mode 100644 index 00000000000..48e152f0a9b --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package initsystem + +// InitSystem is the interface that describe behaviors of an init system +type InitSystem interface { + // return a string describing how to enable a service + EnableCommand(service string) string + + // ServiceStart tries to start a specific service + ServiceStart(service string) error + + // ServiceStop tries to stop a specific service + ServiceStop(service string) error + + // ServiceRestart tries to reload the environment and restart the specific service + ServiceRestart(service string) error + + // ServiceExists ensures the service is defined for this init system. + ServiceExists(service string) bool + + // ServiceIsEnabled ensures the service is enabled to start on each boot. + ServiceIsEnabled(service string) bool + + // ServiceIsActive ensures the service is running, or attempting to run. (crash looping in the case of kubelet) + ServiceIsActive(service string) bool +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_unix.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_unix.go new file mode 100644 index 00000000000..5cbf9099e75 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_unix.go @@ -0,0 +1,164 @@ +// +build !windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package initsystem + +import ( + "fmt" + "os/exec" + "strings" +) + +// OpenRCInitSystem defines openrc +type OpenRCInitSystem struct{} + +// ServiceStart tries to start a specific service +func (openrc OpenRCInitSystem) ServiceStart(service string) error { + args := []string{service, "start"} + return exec.Command("rc-service", args...).Run() +} + +// ServiceStop tries to stop a specific service +func (openrc OpenRCInitSystem) ServiceStop(service string) error { + args := []string{service, "stop"} + return exec.Command("rc-service", args...).Run() +} + +// ServiceRestart tries to reload the environment and restart the specific service +func (openrc OpenRCInitSystem) ServiceRestart(service string) error { + args := []string{service, "restart"} + return exec.Command("rc-service", args...).Run() +} + +// ServiceExists ensures the service is defined for this init system. +// openrc writes to stderr if a service is not found or not enabled +// this is in contrast to systemd which only writes to stdout. +// Hence, we use the Combinedoutput, and ignore the error. +func (openrc OpenRCInitSystem) ServiceExists(service string) bool { + args := []string{service, "status"} + outBytes, _ := exec.Command("rc-service", args...).CombinedOutput() + return !strings.Contains(string(outBytes), "does not exist") +} + +// ServiceIsEnabled ensures the service is enabled to start on each boot. +func (openrc OpenRCInitSystem) ServiceIsEnabled(service string) bool { + args := []string{"show", "default"} + outBytes, _ := exec.Command("rc-update", args...).Output() + return strings.Contains(string(outBytes), service) +} + +// ServiceIsActive ensures the service is running, or attempting to run. (crash looping in the case of kubelet) +func (openrc OpenRCInitSystem) ServiceIsActive(service string) bool { + args := []string{service, "status"} + outBytes, _ := exec.Command("rc-service", args...).CombinedOutput() + outStr := string(outBytes) + return !strings.Contains(outStr, "stopped") && !strings.Contains(outStr, "does not exist") +} + +// EnableCommand return a string describing how to enable a service +func (openrc OpenRCInitSystem) EnableCommand(service string) string { + return fmt.Sprintf("rc-update add %s default", service) +} + +// SystemdInitSystem defines systemd +type SystemdInitSystem struct{} + +// EnableCommand return a string describing how to enable a service +func (sysd SystemdInitSystem) EnableCommand(service string) string { + return fmt.Sprintf("systemctl enable %s.service", service) +} + +// reloadSystemd reloads the systemd daemon +func (sysd SystemdInitSystem) reloadSystemd() error { + if err := exec.Command("systemctl", "daemon-reload").Run(); err != nil { + return fmt.Errorf("failed to reload systemd: %v", err) + } + return nil +} + +// ServiceStart tries to start a specific service +func (sysd SystemdInitSystem) ServiceStart(service string) error { + // Before we try to start any service, make sure that systemd is ready + if err := sysd.reloadSystemd(); err != nil { + return err + } + args := []string{"start", service} + return exec.Command("systemctl", args...).Run() +} + +// ServiceRestart tries to reload the environment and restart the specific service +func (sysd SystemdInitSystem) ServiceRestart(service string) error { + // Before we try to restart any service, make sure that systemd is ready + if err := sysd.reloadSystemd(); err != nil { + return err + } + args := []string{"restart", service} + return exec.Command("systemctl", args...).Run() +} + +// ServiceStop tries to stop a specific service +func (sysd SystemdInitSystem) ServiceStop(service string) error { + args := []string{"stop", service} + return exec.Command("systemctl", args...).Run() +} + +// ServiceExists ensures the service is defined for this init system. +func (sysd SystemdInitSystem) ServiceExists(service string) bool { + args := []string{"status", service} + outBytes, _ := exec.Command("systemctl", args...).Output() + output := string(outBytes) + return !strings.Contains(output, "Loaded: not-found") +} + +// ServiceIsEnabled ensures the service is enabled to start on each boot. +func (sysd SystemdInitSystem) ServiceIsEnabled(service string) bool { + args := []string{"is-enabled", service} + err := exec.Command("systemctl", args...).Run() + return err == nil +} + +// ServiceIsActive will check is the service is "active". In the case of +// crash looping services (kubelet in our case) status will return as +// "activating", so we will consider this active as well. +func (sysd SystemdInitSystem) ServiceIsActive(service string) bool { + args := []string{"is-active", service} + // Ignoring error here, command returns non-0 if in "activating" status: + outBytes, _ := exec.Command("systemctl", args...).Output() + output := strings.TrimSpace(string(outBytes)) + if output == "active" || output == "activating" { + return true + } + return false +} + +// GetInitSystem returns an InitSystem for the current system, or nil +// if we cannot detect a supported init system. +// This indicates we will skip init system checks, not an error. +func GetInitSystem() (InitSystem, error) { + // Assume existence of systemctl in path implies this is a systemd system: + _, err := exec.LookPath("systemctl") + if err == nil { + return &SystemdInitSystem{}, nil + } + _, err = exec.LookPath("openrc") + if err == nil { + return &OpenRCInitSystem{}, nil + } + + return nil, fmt.Errorf("no supported init system detected, skipping checking for services") +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_windows.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_windows.go new file mode 100644 index 00000000000..394272ddcb3 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_windows.go @@ -0,0 +1,245 @@ +// +build windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package initsystem + +import ( + "fmt" + "time" + + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/mgr" +) + +// WindowsInitSystem is the windows implementation of InitSystem +type WindowsInitSystem struct{} + +// EnableCommand return a string describing how to enable a service +func (sysd WindowsInitSystem) EnableCommand(service string) string { + return fmt.Sprintf("Set-Service '%s' -StartupType Automatic", service) +} + +// ServiceStart tries to start a specific service +// Following Windows documentation: https://docs.microsoft.com/en-us/windows/desktop/Services/starting-a-service +func (sysd WindowsInitSystem) ServiceStart(service string) error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + s, err := m.OpenService(service) + if err != nil { + return fmt.Errorf("could not access service %s: %v", service, err) + } + defer s.Close() + + // Check if service is already started + status, err := s.Query() + if err != nil { + return fmt.Errorf("could not query service %s: %v", service, err) + } + + if status.State != svc.Stopped && status.State != svc.StopPending { + return nil + } + + timeout := time.Now().Add(10 * time.Second) + for status.State != svc.Stopped { + if timeout.Before(time.Now()) { + return fmt.Errorf("timeout waiting for %s service to stop", service) + } + time.Sleep(300 * time.Millisecond) + status, err = s.Query() + if err != nil { + return fmt.Errorf("could not retrieve %s service status: %v", service, err) + } + } + + // Start the service + err = s.Start("is", "manual-started") + if err != nil { + return fmt.Errorf("could not start service %s: %v", service, err) + } + + // Check that the start was successful + status, err = s.Query() + if err != nil { + return fmt.Errorf("could not query service %s: %v", service, err) + } + timeout = time.Now().Add(10 * time.Second) + for status.State != svc.Running { + if timeout.Before(time.Now()) { + return fmt.Errorf("timeout waiting for %s service to start", service) + } + time.Sleep(300 * time.Millisecond) + status, err = s.Query() + if err != nil { + return fmt.Errorf("could not retrieve %s service status: %v", service, err) + } + } + return nil +} + +// ServiceRestart tries to reload the environment and restart the specific service +func (sysd WindowsInitSystem) ServiceRestart(service string) error { + if err := sysd.ServiceStop(service); err != nil { + return fmt.Errorf("couldn't stop service %s: %v", service, err) + } + if err := sysd.ServiceStart(service); err != nil { + return fmt.Errorf("couldn't start service %s: %v", service, err) + } + + return nil +} + +// ServiceStop tries to stop a specific service +// Following Windows documentation: https://docs.microsoft.com/en-us/windows/desktop/Services/stopping-a-service +func (sysd WindowsInitSystem) ServiceStop(service string) error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + s, err := m.OpenService(service) + if err != nil { + return fmt.Errorf("could not access service %s: %v", service, err) + } + defer s.Close() + + // Check if service is already stopped + status, err := s.Query() + if err != nil { + return fmt.Errorf("could not query service %s: %v", service, err) + } + + if status.State == svc.Stopped { + return nil + } + + // If StopPending, check that service eventually stops + if status.State == svc.StopPending { + timeout := time.Now().Add(10 * time.Second) + for status.State != svc.Stopped { + if timeout.Before(time.Now()) { + return fmt.Errorf("timeout waiting for %s service to stop", service) + } + time.Sleep(300 * time.Millisecond) + status, err = s.Query() + if err != nil { + return fmt.Errorf("could not retrieve %s service status: %v", service, err) + } + } + return nil + } + + // Stop the service + status, err = s.Control(svc.Stop) + if err != nil { + return fmt.Errorf("could not stop service %s: %v", service, err) + } + + // Check that the stop was successful + status, err = s.Query() + if err != nil { + return fmt.Errorf("could not query service %s: %v", service, err) + } + timeout := time.Now().Add(10 * time.Second) + for status.State != svc.Stopped { + if timeout.Before(time.Now()) { + return fmt.Errorf("timeout waiting for %s service to stop", service) + } + time.Sleep(300 * time.Millisecond) + status, err = s.Query() + if err != nil { + return fmt.Errorf("could not retrieve %s service status: %v", service, err) + } + } + return nil +} + +// ServiceExists ensures the service is defined for this init system. +func (sysd WindowsInitSystem) ServiceExists(service string) bool { + m, err := mgr.Connect() + if err != nil { + return false + } + defer m.Disconnect() + s, err := m.OpenService(service) + if err != nil { + return false + } + defer s.Close() + + return true +} + +// ServiceIsEnabled ensures the service is enabled to start on each boot. +func (sysd WindowsInitSystem) ServiceIsEnabled(service string) bool { + m, err := mgr.Connect() + if err != nil { + return false + } + defer m.Disconnect() + + s, err := m.OpenService(service) + if err != nil { + return false + } + defer s.Close() + + c, err := s.Config() + if err != nil { + return false + } + + return c.StartType != mgr.StartDisabled +} + +// ServiceIsActive ensures the service is running, or attempting to run. (crash looping in the case of kubelet) +func (sysd WindowsInitSystem) ServiceIsActive(service string) bool { + m, err := mgr.Connect() + if err != nil { + return false + } + defer m.Disconnect() + s, err := m.OpenService(service) + if err != nil { + return false + } + defer s.Close() + + status, err := s.Query() + if err != nil { + return false + } + return status.State == svc.Running +} + +// GetInitSystem returns an InitSystem for the current system, or nil +// if we cannot detect a supported init system. +// This indicates we will skip init system checks, not an error. +func GetInitSystem() (InitSystem, error) { + m, err := mgr.Connect() + if err != nil { + return nil, fmt.Errorf("no supported init system detected: %v", err) + } + defer m.Disconnect() + return &WindowsInitSystem{}, nil +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig/kubeconfig.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig/kubeconfig.go new file mode 100644 index 00000000000..fead644c60b --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig/kubeconfig.go @@ -0,0 +1,204 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "fmt" + "io/ioutil" + + "github.com/pkg/errors" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// CreateBasic creates a basic, general KubeConfig object that then can be extended +func CreateBasic(serverURL, clusterName, userName string, caCert []byte) *clientcmdapi.Config { + // Use the cluster and the username as the context name + contextName := fmt.Sprintf("%s@%s", userName, clusterName) + + return &clientcmdapi.Config{ + Clusters: map[string]*clientcmdapi.Cluster{ + clusterName: { + Server: serverURL, + CertificateAuthorityData: caCert, + }, + }, + Contexts: map[string]*clientcmdapi.Context{ + contextName: { + Cluster: clusterName, + AuthInfo: userName, + }, + }, + AuthInfos: map[string]*clientcmdapi.AuthInfo{}, + CurrentContext: contextName, + } +} + +// CreateWithCerts creates a KubeConfig object with access to the API server with client certificates +func CreateWithCerts(serverURL, clusterName, userName string, caCert []byte, clientKey []byte, clientCert []byte) *clientcmdapi.Config { + config := CreateBasic(serverURL, clusterName, userName, caCert) + config.AuthInfos[userName] = &clientcmdapi.AuthInfo{ + ClientKeyData: clientKey, + ClientCertificateData: clientCert, + } + return config +} + +// CreateWithToken creates a KubeConfig object with access to the API server with a token +func CreateWithToken(serverURL, clusterName, userName string, caCert []byte, token string) *clientcmdapi.Config { + config := CreateBasic(serverURL, clusterName, userName, caCert) + config.AuthInfos[userName] = &clientcmdapi.AuthInfo{ + Token: token, + } + return config +} + +// ClientSetFromFile returns a ready-to-use client from a kubeconfig file +func ClientSetFromFile(path string) (*clientset.Clientset, error) { + config, err := clientcmd.LoadFromFile(path) + if err != nil { + return nil, errors.Wrap(err, "failed to load admin kubeconfig") + } + return ToClientSet(config) +} + +// ToClientSet converts a KubeConfig object to a client +func ToClientSet(config *clientcmdapi.Config) (*clientset.Clientset, error) { + overrides := clientcmd.ConfigOverrides{Timeout: "10s"} + clientConfig, err := clientcmd.NewDefaultClientConfig(*config, &overrides).ClientConfig() + if err != nil { + return nil, errors.Wrap(err, "failed to create API client configuration from kubeconfig") + } + + client, err := clientset.NewForConfig(clientConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to create API client") + } + return client, nil +} + +// WriteToDisk writes a KubeConfig object down to disk with mode 0600 +func WriteToDisk(filename string, kubeconfig *clientcmdapi.Config) error { + err := clientcmd.WriteToFile(*kubeconfig, filename) + if err != nil { + return err + } + + return nil +} + +// GetClusterFromKubeConfig returns the default Cluster of the specified KubeConfig +func GetClusterFromKubeConfig(config *clientcmdapi.Config) *clientcmdapi.Cluster { + // If there is an unnamed cluster object, use it + if config.Clusters[""] != nil { + return config.Clusters[""] + } + if config.Contexts[config.CurrentContext] != nil { + return config.Clusters[config.Contexts[config.CurrentContext].Cluster] + } + return nil +} + +// HasAuthenticationCredentials returns true if the current user has valid authentication credentials for +// token authentication, basic authentication or X509 authentication +func HasAuthenticationCredentials(config *clientcmdapi.Config) bool { + authInfo := getCurrentAuthInfo(config) + if authInfo == nil { + return false + } + + // token authentication + if len(authInfo.Token) != 0 { + return true + } + + // basic authentication + if len(authInfo.Username) != 0 && len(authInfo.Password) != 0 { + return true + } + + // X509 authentication + if (len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0) && + (len(authInfo.ClientKey) != 0 || len(authInfo.ClientKeyData) != 0) { + return true + } + + return false +} + +// EnsureAuthenticationInfoAreEmbedded check if some authentication info are provided as external key/certificate +// files, and eventually embeds such files into the kubeconfig file +func EnsureAuthenticationInfoAreEmbedded(config *clientcmdapi.Config) error { + authInfo := getCurrentAuthInfo(config) + if authInfo == nil { + return errors.New("invalid kubeconfig file. AuthInfo is not defined for the current user") + } + + if len(authInfo.ClientCertificateData) == 0 && len(authInfo.ClientCertificate) != 0 { + clientCert, err := ioutil.ReadFile(authInfo.ClientCertificate) + if err != nil { + return errors.Wrap(err, "error while reading client cert file defined in kubeconfig") + } + authInfo.ClientCertificateData = clientCert + authInfo.ClientCertificate = "" + } + if len(authInfo.ClientKeyData) == 0 && len(authInfo.ClientKey) != 0 { + clientKey, err := ioutil.ReadFile(authInfo.ClientKey) + if err != nil { + return errors.Wrap(err, "error while reading client key file defined in kubeconfig") + } + authInfo.ClientKeyData = clientKey + authInfo.ClientKey = "" + } + + return nil +} + +// EnsureCertificateAuthorityIsEmbedded check if the certificate authority is provided as an external +// file and eventually embeds it into the kubeconfig +func EnsureCertificateAuthorityIsEmbedded(cluster *clientcmdapi.Cluster) error { + if cluster == nil { + return errors.New("received nil value for Cluster") + } + + if len(cluster.CertificateAuthorityData) == 0 && len(cluster.CertificateAuthority) != 0 { + ca, err := ioutil.ReadFile(cluster.CertificateAuthority) + if err != nil { + return errors.Wrap(err, "error while reading certificate authority file defined in kubeconfig") + } + cluster.CertificateAuthorityData = ca + cluster.CertificateAuthority = "" + } + + return nil +} + +// getCurrentAuthInfo returns current authInfo, if defined +func getCurrentAuthInfo(config *clientcmdapi.Config) *clientcmdapi.AuthInfo { + if config == nil || config.CurrentContext == "" || + len(config.Contexts) == 0 || config.Contexts[config.CurrentContext] == nil { + return nil + } + user := config.Contexts[config.CurrentContext].AuthInfo + + if user == "" || len(config.AuthInfos) == 0 || config.AuthInfos[user] == nil { + return nil + } + + return config.AuthInfos[user] +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig/kubeconfig_test.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig/kubeconfig_test.go new file mode 100644 index 00000000000..44cbbaa6795 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig/kubeconfig_test.go @@ -0,0 +1,330 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "testing" + + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +const ( + configOut1 = `apiVersion: v1 +clusters: +- cluster: + server: "" + name: k8s +contexts: +- context: + cluster: k8s + user: user1 + name: user1@k8s +current-context: user1@k8s +kind: Config +preferences: {} +users: +- name: user1 + user: + token: abc +` + configOut2 = `apiVersion: v1 +clusters: +- cluster: + server: localhost:8080 + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: user2 + name: user2@kubernetes +current-context: user2@kubernetes +kind: Config +preferences: {} +users: +- name: user2 + user: + token: cba +` +) + +type configClient struct { + clusterName string + userName string + serverURL string + caCert []byte +} + +type configClientWithCerts struct { + clientKey []byte + clientCert []byte +} + +type configClientWithToken struct { + token string +} + +func TestCreateWithCerts(t *testing.T) { + var createBasicTest = []struct { + name string + cc configClient + ccWithCerts configClientWithCerts + expected string + }{ + {"empty config", configClient{}, configClientWithCerts{}, ""}, + {"clusterName kubernetes", configClient{clusterName: "kubernetes"}, configClientWithCerts{}, ""}, + } + for _, rt := range createBasicTest { + t.Run(rt.name, func(t *testing.T) { + cwc := CreateWithCerts( + rt.cc.serverURL, + rt.cc.clusterName, + rt.cc.userName, + rt.cc.caCert, + rt.ccWithCerts.clientKey, + rt.ccWithCerts.clientCert, + ) + if cwc.Kind != rt.expected { + t.Errorf( + "failed CreateWithCerts:\n\texpected: %s\n\t actual: %s", + rt.expected, + cwc.Kind, + ) + } + }) + } +} + +func TestCreateWithToken(t *testing.T) { + var createBasicTest = []struct { + name string + cc configClient + ccWithToken configClientWithToken + expected string + }{ + {"empty config", configClient{}, configClientWithToken{}, ""}, + {"clusterName kubernetes", configClient{clusterName: "kubernetes"}, configClientWithToken{}, ""}, + } + for _, rt := range createBasicTest { + t.Run(rt.name, func(t *testing.T) { + cwc := CreateWithToken( + rt.cc.serverURL, + rt.cc.clusterName, + rt.cc.userName, + rt.cc.caCert, + rt.ccWithToken.token, + ) + if cwc.Kind != rt.expected { + t.Errorf( + "failed CreateWithToken:\n\texpected: %s\n\t actual: %s", + rt.expected, + cwc.Kind, + ) + } + }) + } +} + +func TestWriteKubeconfigToDisk(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Couldn't create tmpdir") + } + defer os.RemoveAll(tmpdir) + + var writeConfig = []struct { + name string + cc configClient + ccWithToken configClientWithToken + expected error + file []byte + }{ + {"test1", configClient{clusterName: "k8s", userName: "user1"}, configClientWithToken{token: "abc"}, nil, []byte(configOut1)}, + {"test2", configClient{clusterName: "kubernetes", userName: "user2", serverURL: "localhost:8080"}, configClientWithToken{token: "cba"}, nil, []byte(configOut2)}, + } + for _, rt := range writeConfig { + t.Run(rt.name, func(t *testing.T) { + c := CreateWithToken( + rt.cc.serverURL, + rt.cc.clusterName, + rt.cc.userName, + rt.cc.caCert, + rt.ccWithToken.token, + ) + configPath := fmt.Sprintf("%s/etc/kubernetes/%s.conf", tmpdir, rt.name) + err := WriteToDisk(configPath, c) + if err != rt.expected { + t.Errorf( + "failed WriteToDisk with an error:\n\texpected: %s\n\t actual: %s", + rt.expected, + err, + ) + } + newFile, _ := ioutil.ReadFile(configPath) + if !bytes.Equal(newFile, rt.file) { + t.Errorf( + "failed WriteToDisk config write:\n\texpected: %s\n\t actual: %s", + rt.file, + newFile, + ) + } + }) + } +} + +func TestGetCurrentAuthInfo(t *testing.T) { + var testCases = []struct { + name string + config *clientcmdapi.Config + expected bool + }{ + { + name: "nil context", + config: nil, + expected: false, + }, + { + name: "no CurrentContext value", + config: &clientcmdapi.Config{}, + expected: false, + }, + { + name: "no CurrentContext object", + config: &clientcmdapi.Config{CurrentContext: "kubernetes"}, + expected: false, + }, + { + name: "CurrentContext object with bad contents", + config: &clientcmdapi.Config{ + CurrentContext: "kubernetes", + Contexts: map[string]*clientcmdapi.Context{"NOTkubernetes": {}}, + }, + expected: false, + }, + { + name: "no AuthInfo value", + config: &clientcmdapi.Config{ + CurrentContext: "kubernetes", + Contexts: map[string]*clientcmdapi.Context{"kubernetes": {}}, + }, + expected: false, + }, + { + name: "no AuthInfo object", + config: &clientcmdapi.Config{ + CurrentContext: "kubernetes", + Contexts: map[string]*clientcmdapi.Context{"kubernetes": {AuthInfo: "kubernetes"}}, + }, + expected: false, + }, + { + name: "AuthInfo object with bad contents", + config: &clientcmdapi.Config{ + CurrentContext: "kubernetes", + Contexts: map[string]*clientcmdapi.Context{"kubernetes": {AuthInfo: "kubernetes"}}, + AuthInfos: map[string]*clientcmdapi.AuthInfo{"NOTkubernetes": {}}, + }, + expected: false, + }, + { + name: "valid AuthInfo", + config: &clientcmdapi.Config{ + CurrentContext: "kubernetes", + Contexts: map[string]*clientcmdapi.Context{"kubernetes": {AuthInfo: "kubernetes"}}, + AuthInfos: map[string]*clientcmdapi.AuthInfo{"kubernetes": {}}, + }, + expected: true, + }, + } + for _, rt := range testCases { + t.Run(rt.name, func(t *testing.T) { + r := getCurrentAuthInfo(rt.config) + if rt.expected != (r != nil) { + t.Errorf( + "failed TestHasCredentials:\n\texpected: %v\n\t actual: %v", + rt.expected, + r, + ) + } + }) + } +} + +func TestHasCredentials(t *testing.T) { + var testCases = []struct { + name string + config *clientcmdapi.Config + expected bool + }{ + { + name: "no authInfo", + config: nil, + expected: false, + }, + { + name: "no credentials", + config: &clientcmdapi.Config{ + CurrentContext: "kubernetes", + Contexts: map[string]*clientcmdapi.Context{"kubernetes": {AuthInfo: "kubernetes"}}, + AuthInfos: map[string]*clientcmdapi.AuthInfo{"kubernetes": {}}, + }, + expected: false, + }, + { + name: "token authentication credentials", + config: &clientcmdapi.Config{ + CurrentContext: "kubernetes", + Contexts: map[string]*clientcmdapi.Context{"kubernetes": {AuthInfo: "kubernetes"}}, + AuthInfos: map[string]*clientcmdapi.AuthInfo{"kubernetes": {Token: "123"}}, + }, + expected: true, + }, + { + name: "basic authentication credentials", + config: &clientcmdapi.Config{ + CurrentContext: "kubernetes", + Contexts: map[string]*clientcmdapi.Context{"kubernetes": {AuthInfo: "kubernetes"}}, + AuthInfos: map[string]*clientcmdapi.AuthInfo{"kubernetes": {Username: "A", Password: "B"}}, + }, + expected: true, + }, + { + name: "X509 authentication credentials", + config: &clientcmdapi.Config{ + CurrentContext: "kubernetes", + Contexts: map[string]*clientcmdapi.Context{"kubernetes": {AuthInfo: "kubernetes"}}, + AuthInfos: map[string]*clientcmdapi.AuthInfo{"kubernetes": {ClientKey: "A", ClientCertificate: "B"}}, + }, + expected: true, + }, + } + for _, rt := range testCases { + t.Run(rt.name, func(t *testing.T) { + r := HasAuthenticationCredentials(rt.config) + if rt.expected != r { + t.Errorf( + "failed TestHasCredentials:\n\texpected: %v\n\t actual: %v", + rt.expected, + r, + ) + } + }) + } +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/pubkeypin/pubkeypin.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/pubkeypin/pubkeypin.go new file mode 100644 index 00000000000..fb157160d6a --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/pubkeypin/pubkeypin.go @@ -0,0 +1,115 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package pubkeypin provides primitives for x509 public key pinning in the +// style of RFC7469. +package pubkeypin + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "strings" + + "github.com/pkg/errors" +) + +const ( + // formatSHA256 is the prefix for pins that are full-length SHA-256 hashes encoded in base 16 (hex) + formatSHA256 = "sha256" +) + +// Set is a set of pinned x509 public keys. +type Set struct { + sha256Hashes map[string]bool +} + +// NewSet returns a new, empty PubKeyPinSet +func NewSet() *Set { + return &Set{make(map[string]bool)} +} + +// Allow adds an allowed public key hash to the Set +func (s *Set) Allow(pubKeyHashes ...string) error { + for _, pubKeyHash := range pubKeyHashes { + parts := strings.Split(pubKeyHash, ":") + if len(parts) != 2 { + return errors.New("invalid public key hash, expected \"format:value\"") + } + format, value := parts[0], parts[1] + + switch strings.ToLower(format) { + case "sha256": + return s.allowSHA256(value) + default: + return errors.Errorf("unknown hash format %q", format) + } + } + return nil +} + +// CheckAny checks if at least one certificate matches one of the public keys in the set +func (s *Set) CheckAny(certificates []*x509.Certificate) error { + var hashes []string + + for _, certificate := range certificates { + if s.checkSHA256(certificate) { + return nil + } + + hashes = append(hashes, Hash(certificate)) + } + return errors.Errorf("none of the public keys %q are pinned", strings.Join(hashes, ":")) +} + +// Empty returns true if the Set contains no pinned public keys. +func (s *Set) Empty() bool { + return len(s.sha256Hashes) == 0 +} + +// Hash calculates the SHA-256 hash of the Subject Public Key Information (SPKI) +// object in an x509 certificate (in DER encoding). It returns the full hash as a +// hex encoded string (suitable for passing to Set.Allow). +func Hash(certificate *x509.Certificate) string { + spkiHash := sha256.Sum256(certificate.RawSubjectPublicKeyInfo) + return formatSHA256 + ":" + strings.ToLower(hex.EncodeToString(spkiHash[:])) +} + +// allowSHA256 validates a "sha256" format hash and adds a canonical version of it into the Set +func (s *Set) allowSHA256(hash string) error { + // validate that the hash is the right length to be a full SHA-256 hash + hashLength := hex.DecodedLen(len(hash)) + if hashLength != sha256.Size { + return errors.Errorf("expected a %d byte SHA-256 hash, found %d bytes", sha256.Size, hashLength) + } + + // validate that the hash is valid hex + _, err := hex.DecodeString(hash) + if err != nil { + return err + } + + // in the end, just store the original hex string in memory (in lowercase) + s.sha256Hashes[strings.ToLower(hash)] = true + return nil +} + +// checkSHA256 returns true if the certificate's "sha256" hash is pinned in the Set +func (s *Set) checkSHA256(certificate *x509.Certificate) bool { + actualHash := sha256.Sum256(certificate.RawSubjectPublicKeyInfo) + actualHashHex := strings.ToLower(hex.EncodeToString(actualHash[:])) + return s.sha256Hashes[actualHashHex] +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/pubkeypin/pubkeypin_test.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/pubkeypin/pubkeypin_test.go new file mode 100644 index 00000000000..8ca2c6cf776 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/pubkeypin/pubkeypin_test.go @@ -0,0 +1,157 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pubkeypin + +import ( + "crypto/x509" + "encoding/pem" + "strings" + "testing" +) + +// testCertPEM is a simple self-signed test certificate issued with the openssl CLI: +// openssl req -new -newkey rsa:2048 -days 36500 -nodes -x509 -keyout /dev/null -out test.crt +const testCertPEM = ` +-----BEGIN CERTIFICATE----- +MIIDRDCCAiygAwIBAgIJAJgVaCXvC6HkMA0GCSqGSIb3DQEBBQUAMB8xHTAbBgNV +BAMTFGt1YmVhZG0ta2V5cGlucy10ZXN0MCAXDTE3MDcwNTE3NDMxMFoYDzIxMTcw +NjExMTc0MzEwWjAfMR0wGwYDVQQDExRrdWJlYWRtLWtleXBpbnMtdGVzdDCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK0ba8mHU9UtYlzM1Own2Fk/XGjR +J4uJQvSeGLtz1hID1IA0dLwruvgLCPadXEOw/f/IWIWcmT+ZmvIHZKa/woq2iHi5 ++HLhXs7aG4tjKGLYhag1hLjBI7icqV7ovkjdGAt9pWkxEzhIYClFMXDjKpMSynu+ +YX6nZ9tic1cOkHmx2yiZdMkuriRQnpTOa7bb03OC1VfGl7gHlOAIYaj4539WCOr8 ++ACTUMJUFEHcRZ2o8a/v6F9GMK+7SC8SJUI+GuroXqlMAdhEv4lX5Co52enYaClN ++D9FJLRpBv2YfiCQdJRaiTvCBSxEFz6BN+PtP5l2Hs703ZWEkOqCByM6HV8CAwEA +AaOBgDB+MB0GA1UdDgQWBBRQgUX8MhK2rWBWQiPHWcKzoWDH5DBPBgNVHSMESDBG +gBRQgUX8MhK2rWBWQiPHWcKzoWDH5KEjpCEwHzEdMBsGA1UEAxMUa3ViZWFkbS1r +ZXlwaW5zLXRlc3SCCQCYFWgl7wuh5DAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEB +BQUAA4IBAQCaAUif7Pfx3X0F08cxhx8/Hdx4jcJw6MCq6iq6rsXM32ge43t8OHKC +pJW08dk58a3O1YQSMMvD6GJDAiAfXzfwcwY6j258b1ZlI9Ag0VokvhMl/XfdCsdh +AWImnL1t4hvU5jLaImUUMlYxMcSfHBGAm7WJIZ2LdEfg6YWfZh+WGbg1W7uxLxk6 +y4h5rWdNnzBHWAGf7zJ0oEDV6W6RSwNXtC0JNnLaeIUm/6xdSddJlQPwUv8YH4jX +c1vuFqTnJBPcb7W//R/GI2Paicm1cmns9NLnPR35exHxFTy+D1yxmGokpoPMdife +aH+sfuxT8xeTPb3kjzF9eJTlnEquUDLM +-----END CERTIFICATE-----` + +// expectedHash can be verified using the openssl CLI. +const expectedHash = `sha256:345959acb2c3b2feb87d281961c893f62a314207ef02599f1cc4a5fb255480b3` + +// testCert2PEM is a second test cert generated the same way as testCertPEM +const testCert2PEM = ` +-----BEGIN CERTIFICATE----- +MIID9jCCAt6gAwIBAgIJAN5MXZDic7qYMA0GCSqGSIb3DQEBBQUAMFkxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQxEjAQBgNVBAMTCXRlc3RDZXJ0MjAgFw0xNzA3MjQxNjA0 +MDFaGA8yMTE3MDYzMDE2MDQwMVowWTELMAkGA1UEBhMCQVUxEzARBgNVBAgTClNv +bWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAG +A1UEAxMJdGVzdENlcnQyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +0brwpJYN2ytPWzRBtZSVc3dhkQlA59AzxzqeLLkano0Pxo9NIc3T/y58nnRI8uaS +I1P7BzUfJTiUEvmAtX8NggqKK4ld/gPrU+IRww1CUYS4KCkA/0d0ctPy0JwBCjD+ +b57G3rmNE8c+0jns6J96ZzNtqmv6N+ZlFBAXm1p4S+k0kGi5+hoQ6H7SYXjk2lG+ +r/8jPQEjy/NSdw1dcCA0Nc6o+hPr32927dS6J9KOhBeXNYUNdbuDDmroM9/gN2e/ +YMSA1olLeDPQ7Xvhk0PIyEDnHh83AffPCx5yM3htVRGddjIsPAVUJEL3z5leJtxe +fzyPghOhHJY0PXqznDQTcwIDAQABo4G+MIG7MB0GA1UdDgQWBBRP0IJqv/5rQ4Uf +SByl77dJeEapRDCBiwYDVR0jBIGDMIGAgBRP0IJqv/5rQ4UfSByl77dJeEapRKFd +pFswWTELMAkGA1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoT +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAxMJdGVzdENlcnQyggkA +3kxdkOJzupgwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEA0RIMHc10 +wHHPMh9UflqBgDMF7gfbOL0juJfGloAOcohWWfMZBBJ0CQKMy3xRyoK3HmbW1eeb +iATjesw7t4VEAwf7mgKAd+eTfWYB952uq5qYJ2TI28mSofEq1Wz3RmrNkC1KCBs1 +u+YMFGwyl6necV9zKCeiju4jeovI1GA38TvH7MgYln6vMJ+FbgOXj7XCpek7dQiY +KGaeSSH218mGNQaWRQw2Sm3W6cFdANoCJUph4w18s7gjtFpfV63s80hXRps+vEyv +jEQMEQpG8Ss7HGJLGLBw/xAmG0e//XS/o2dDonbGbvzToFByz8OGxjMhk6yV6hdd ++iyvsLAw/MYMSA== +-----END CERTIFICATE----- +` + +// testCert is a small helper to get a test x509.Certificate from the PEM constants +func testCert(t *testing.T, pemString string) *x509.Certificate { + // Decode the example certificate from a PEM file into a PEM block + pemBlock, _ := pem.Decode([]byte(pemString)) + if pemBlock == nil { + t.Fatal("failed to parse test certificate PEM") + return nil + } + + // Parse the PEM block into an x509.Certificate + result, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + t.Fatalf("failed to parse test certificate: %v", err) + return nil + } + return result +} + +func TestSet(t *testing.T) { + s := NewSet() + if !s.Empty() { + t.Error("expected a new set to be empty") + return + } + err := s.Allow("xyz") + if err == nil || !s.Empty() { + t.Error("expected allowing junk to fail") + return + } + + err = s.Allow("0011223344") + if err == nil || !s.Empty() { + t.Error("expected allowing something too short to fail") + return + } + + err = s.Allow(expectedHash + expectedHash) + if err == nil || !s.Empty() { + t.Error("expected allowing something too long to fail") + return + } + + err = s.CheckAny([]*x509.Certificate{testCert(t, testCertPEM)}) + if err == nil { + t.Error("expected test cert to not be allowed (yet)") + return + } + + err = s.Allow(strings.ToUpper(expectedHash)) + if err != nil || s.Empty() { + t.Error("expected allowing uppercase expectedHash to succeed") + return + } + + err = s.CheckAny([]*x509.Certificate{testCert(t, testCertPEM)}) + if err != nil { + t.Errorf("expected test cert to be allowed, but got back: %v", err) + return + } + + err = s.CheckAny([]*x509.Certificate{testCert(t, testCert2PEM)}) + if err == nil { + t.Error("expected the second test cert to be disallowed") + return + } +} + +func TestHash(t *testing.T) { + actualHash := Hash(testCert(t, testCertPEM)) + if actualHash != expectedHash { + t.Errorf( + "failed to Hash() to the expected value\n\texpected: %q\n\t actual: %q", + expectedHash, + actualHash, + ) + } +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime.go new file mode 100644 index 00000000000..c2395741f15 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime.go @@ -0,0 +1,235 @@ +/* +Copyright 2018 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "path/filepath" + goruntime "runtime" + "strings" + + "github.com/pkg/errors" + errorsutil "k8s.io/apimachinery/pkg/util/errors" + utilsexec "k8s.io/utils/exec" + + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" +) + +// ContainerRuntime is an interface for working with container runtimes +type ContainerRuntime interface { + IsDocker() bool + IsRunning() error + ListKubeContainers() ([]string, error) + RemoveContainers(containers []string) error + PullImage(image string) error + ImageExists(image string) (bool, error) +} + +// CRIRuntime is a struct that interfaces with the CRI +type CRIRuntime struct { + exec utilsexec.Interface + criSocket string +} + +// DockerRuntime is a struct that interfaces with the Docker daemon +type DockerRuntime struct { + exec utilsexec.Interface +} + +// NewContainerRuntime sets up and returns a ContainerRuntime struct +func NewContainerRuntime(execer utilsexec.Interface, criSocket string) (ContainerRuntime, error) { + var toolName string + var runtime ContainerRuntime + + if criSocket != constants.DefaultDockerCRISocket { + toolName = "crictl" + // !!! temporary work around crictl warning: + // Using "/var/run/crio/crio.sock" as endpoint is deprecated, + // please consider using full url format "unix:///var/run/crio/crio.sock" + if filepath.IsAbs(criSocket) && goruntime.GOOS != "windows" { + criSocket = "unix://" + criSocket + } + runtime = &CRIRuntime{execer, criSocket} + } else { + toolName = "docker" + runtime = &DockerRuntime{execer} + } + + if _, err := execer.LookPath(toolName); err != nil { + return nil, errors.Wrapf(err, "%s is required for container runtime", toolName) + } + + return runtime, nil +} + +// IsDocker returns true if the runtime is docker +func (runtime *CRIRuntime) IsDocker() bool { + return false +} + +// IsDocker returns true if the runtime is docker +func (runtime *DockerRuntime) IsDocker() bool { + return true +} + +// IsRunning checks if runtime is running +func (runtime *CRIRuntime) IsRunning() error { + if out, err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "info").CombinedOutput(); err != nil { + return errors.Wrapf(err, "container runtime is not running: output: %s, error", string(out)) + } + return nil +} + +// IsRunning checks if runtime is running +func (runtime *DockerRuntime) IsRunning() error { + if out, err := runtime.exec.Command("docker", "info").CombinedOutput(); err != nil { + return errors.Wrapf(err, "container runtime is not running: output: %s, error", string(out)) + } + return nil +} + +// ListKubeContainers lists running k8s CRI pods +func (runtime *CRIRuntime) ListKubeContainers() ([]string, error) { + out, err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "pods", "-q").CombinedOutput() + if err != nil { + return nil, errors.Wrapf(err, "output: %s, error", string(out)) + } + pods := []string{} + pods = append(pods, strings.Fields(string(out))...) + return pods, nil +} + +// ListKubeContainers lists running k8s containers +func (runtime *DockerRuntime) ListKubeContainers() ([]string, error) { + output, err := runtime.exec.Command("docker", "ps", "-a", "--filter", "name=k8s_", "-q").CombinedOutput() + return strings.Fields(string(output)), err +} + +// RemoveContainers removes running k8s pods +func (runtime *CRIRuntime) RemoveContainers(containers []string) error { + errs := []error{} + for _, container := range containers { + out, err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "stopp", container).CombinedOutput() + if err != nil { + // don't stop on errors, try to remove as many containers as possible + errs = append(errs, errors.Wrapf(err, "failed to stop running pod %s: output: %s, error", container, string(out))) + } else { + out, err = runtime.exec.Command("crictl", "-r", runtime.criSocket, "rmp", container).CombinedOutput() + if err != nil { + errs = append(errs, errors.Wrapf(err, "failed to remove running container %s: output: %s, error", container, string(out))) + } + } + } + return errorsutil.NewAggregate(errs) +} + +// RemoveContainers removes running containers +func (runtime *DockerRuntime) RemoveContainers(containers []string) error { + errs := []error{} + for _, container := range containers { + out, err := runtime.exec.Command("docker", "stop", container).CombinedOutput() + if err != nil { + // don't stop on errors, try to remove as many containers as possible + errs = append(errs, errors.Wrapf(err, "failed to stop running container %s: output: %s, error", container, string(out))) + } else { + out, err = runtime.exec.Command("docker", "rm", "--volumes", container).CombinedOutput() + if err != nil { + errs = append(errs, errors.Wrapf(err, "failed to remove running container %s: output: %s, error", container, string(out))) + } + } + } + return errorsutil.NewAggregate(errs) +} + +// PullImage pulls the image +func (runtime *CRIRuntime) PullImage(image string) error { + var err error + var out []byte + for i := 0; i < constants.PullImageRetry; i++ { + out, err = runtime.exec.Command("crictl", "-r", runtime.criSocket, "pull", image).CombinedOutput() + if err == nil { + return nil + } + } + return errors.Wrapf(err, "output: %s, error", out) +} + +// PullImage pulls the image +func (runtime *DockerRuntime) PullImage(image string) error { + var err error + var out []byte + for i := 0; i < constants.PullImageRetry; i++ { + out, err = runtime.exec.Command("docker", "pull", image).CombinedOutput() + if err == nil { + return nil + } + } + return errors.Wrapf(err, "output: %s, error", out) +} + +// ImageExists checks to see if the image exists on the system +func (runtime *CRIRuntime) ImageExists(image string) (bool, error) { + err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "inspecti", image).Run() + return err == nil, nil +} + +// ImageExists checks to see if the image exists on the system +func (runtime *DockerRuntime) ImageExists(image string) (bool, error) { + err := runtime.exec.Command("docker", "inspect", image).Run() + return err == nil, nil +} + +// detectCRISocketImpl is separated out only for test purposes, DON'T call it directly, use DetectCRISocket instead +func detectCRISocketImpl(isSocket func(string) bool) (string, error) { + foundCRISockets := []string{} + knownCRISockets := []string{ + // Docker and containerd sockets are special cased below, hence not to be included here + "/var/run/crio/crio.sock", + } + + if isSocket(dockerSocket) { + // the path in dockerSocket is not CRI compatible, hence we should replace it with a CRI compatible socket + foundCRISockets = append(foundCRISockets, constants.DefaultDockerCRISocket) + } else if isSocket(containerdSocket) { + // Docker 18.09 gets bundled together with containerd, thus having both dockerSocket and containerdSocket present. + // For compatibility reasons, we use the containerd socket only if Docker is not detected. + foundCRISockets = append(foundCRISockets, containerdSocket) + } + + for _, socket := range knownCRISockets { + if isSocket(socket) { + foundCRISockets = append(foundCRISockets, socket) + } + } + + switch len(foundCRISockets) { + case 0: + // Fall back to Docker if no CRI is detected, we can error out later on if we need it + return constants.DefaultDockerCRISocket, nil + case 1: + // Precisely one CRI found, use that + return foundCRISockets[0], nil + default: + // Multiple CRIs installed? + return "", errors.Errorf("Found multiple CRI sockets, please use --cri-socket to select one: %s", strings.Join(foundCRISockets, ", ")) + } +} + +// DetectCRISocket uses a list of known CRI sockets to detect one. If more than one or none is discovered, an error is returned. +func DetectCRISocket() (string, error) { + return detectCRISocketImpl(isExistingSocket) +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_test.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_test.go new file mode 100644 index 00000000000..7deab15e38c --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_test.go @@ -0,0 +1,463 @@ +/* +Copyright 2018 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "io/ioutil" + "net" + "os" + "reflect" + "runtime" + "testing" + + "github.com/pkg/errors" + "k8s.io/utils/exec" + fakeexec "k8s.io/utils/exec/testing" + + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" +) + +func TestNewContainerRuntime(t *testing.T) { + execLookPathOK := fakeexec.FakeExec{ + LookPathFunc: func(cmd string) (string, error) { return "/usr/bin/crictl", nil }, + } + execLookPathErr := fakeexec.FakeExec{ + LookPathFunc: func(cmd string) (string, error) { return "", errors.Errorf("%s not found", cmd) }, + } + cases := []struct { + name string + execer fakeexec.FakeExec + criSocket string + isDocker bool + isError bool + }{ + {"valid: default cri socket", execLookPathOK, constants.DefaultDockerCRISocket, true, false}, + {"valid: cri-o socket url", execLookPathOK, "unix:///var/run/crio/crio.sock", false, false}, + {"valid: cri-o socket path", execLookPathOK, "/var/run/crio/crio.sock", false, false}, + {"invalid: no crictl", execLookPathErr, "unix:///var/run/crio/crio.sock", false, true}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + runtime, err := NewContainerRuntime(&tc.execer, tc.criSocket) + if err != nil { + if !tc.isError { + t.Fatalf("unexpected NewContainerRuntime error. criSocket: %s, error: %v", tc.criSocket, err) + } + return // expected error occurs, impossible to test runtime further + } + if tc.isError && err == nil { + t.Fatalf("unexpected NewContainerRuntime success. criSocket: %s", tc.criSocket) + } + isDocker := runtime.IsDocker() + if tc.isDocker != isDocker { + t.Fatalf("unexpected isDocker() result %v for the criSocket %s", isDocker, tc.criSocket) + } + }) + } +} + +func genFakeActions(fcmd *fakeexec.FakeCmd, num int) []fakeexec.FakeCommandAction { + var actions []fakeexec.FakeCommandAction + for i := 0; i < num; i++ { + actions = append(actions, func(cmd string, args ...string) exec.Cmd { + return fakeexec.InitFakeCmd(fcmd, cmd, args...) + }) + } + return actions +} + +func TestIsRunning(t *testing.T) { + fcmd := fakeexec.FakeCmd{ + CombinedOutputScript: []fakeexec.FakeAction{ + func() ([]byte, []byte, error) { return nil, nil, nil }, + func() ([]byte, []byte, error) { return []byte("error"), nil, &fakeexec.FakeExitError{Status: 1} }, + func() ([]byte, []byte, error) { return nil, nil, nil }, + func() ([]byte, []byte, error) { return []byte("error"), nil, &fakeexec.FakeExitError{Status: 1} }, + }, + } + + criExecer := fakeexec.FakeExec{ + CommandScript: genFakeActions(&fcmd, len(fcmd.CombinedOutputScript)), + LookPathFunc: func(cmd string) (string, error) { return "/usr/bin/crictl", nil }, + } + + dockerExecer := fakeexec.FakeExec{ + CommandScript: genFakeActions(&fcmd, len(fcmd.CombinedOutputScript)), + LookPathFunc: func(cmd string) (string, error) { return "/usr/bin/docker", nil }, + } + + cases := []struct { + name string + criSocket string + execer fakeexec.FakeExec + isError bool + }{ + {"valid: CRI-O is running", "unix:///var/run/crio/crio.sock", criExecer, false}, + {"invalid: CRI-O is not running", "unix:///var/run/crio/crio.sock", criExecer, true}, + {"valid: docker is running", constants.DefaultDockerCRISocket, dockerExecer, false}, + {"invalid: docker is not running", constants.DefaultDockerCRISocket, dockerExecer, true}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + runtime, err := NewContainerRuntime(&tc.execer, tc.criSocket) + if err != nil { + t.Fatalf("unexpected NewContainerRuntime error: %v", err) + } + isRunning := runtime.IsRunning() + if tc.isError && isRunning == nil { + t.Error("unexpected IsRunning() success") + } + if !tc.isError && isRunning != nil { + t.Error("unexpected IsRunning() error") + } + }) + } +} + +func TestListKubeContainers(t *testing.T) { + fcmd := fakeexec.FakeCmd{ + CombinedOutputScript: []fakeexec.FakeAction{ + func() ([]byte, []byte, error) { return []byte("k8s_p1\nk8s_p2"), nil, nil }, + func() ([]byte, []byte, error) { return nil, nil, &fakeexec.FakeExitError{Status: 1} }, + func() ([]byte, []byte, error) { return []byte("k8s_p1\nk8s_p2"), nil, nil }, + }, + } + execer := fakeexec.FakeExec{ + CommandScript: genFakeActions(&fcmd, len(fcmd.CombinedOutputScript)), + LookPathFunc: func(cmd string) (string, error) { return "/usr/bin/crictl", nil }, + } + + cases := []struct { + name string + criSocket string + isError bool + }{ + {"valid: list containers using CRI socket url", "unix:///var/run/crio/crio.sock", false}, + {"invalid: list containers using CRI socket url", "unix:///var/run/crio/crio.sock", true}, + {"valid: list containers using docker", constants.DefaultDockerCRISocket, false}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + runtime, err := NewContainerRuntime(&execer, tc.criSocket) + if err != nil { + t.Fatalf("unexpected NewContainerRuntime error: %v", err) + } + + containers, err := runtime.ListKubeContainers() + if tc.isError { + if err == nil { + t.Errorf("unexpected ListKubeContainers success") + } + return + } else if err != nil { + t.Errorf("unexpected ListKubeContainers error: %v", err) + } + + if !reflect.DeepEqual(containers, []string{"k8s_p1", "k8s_p2"}) { + t.Errorf("unexpected ListKubeContainers output: %v", containers) + } + }) + } +} + +func TestRemoveContainers(t *testing.T) { + fakeOK := func() ([]byte, []byte, error) { return nil, nil, nil } + fakeErr := func() ([]byte, []byte, error) { return []byte("error"), nil, &fakeexec.FakeExitError{Status: 1} } + fcmd := fakeexec.FakeCmd{ + CombinedOutputScript: []fakeexec.FakeAction{ + fakeOK, fakeOK, fakeOK, fakeOK, fakeOK, fakeOK, // Test case 1 + fakeOK, fakeOK, fakeOK, fakeErr, fakeOK, fakeOK, + fakeErr, fakeOK, fakeOK, fakeErr, fakeOK, + fakeOK, fakeOK, fakeOK, fakeOK, fakeOK, fakeOK, + fakeOK, fakeOK, fakeOK, fakeErr, fakeOK, fakeOK, + fakeErr, fakeOK, fakeOK, fakeErr, fakeOK, + }, + } + execer := fakeexec.FakeExec{ + CommandScript: genFakeActions(&fcmd, len(fcmd.CombinedOutputScript)), + LookPathFunc: func(cmd string) (string, error) { return "/usr/bin/crictl", nil }, + } + + cases := []struct { + name string + criSocket string + containers []string + isError bool + }{ + {"valid: remove containers using CRI", "unix:///var/run/crio/crio.sock", []string{"k8s_p1", "k8s_p2", "k8s_p3"}, false}, // Test case 1 + {"invalid: CRI rmp failure", "unix:///var/run/crio/crio.sock", []string{"k8s_p1", "k8s_p2", "k8s_p3"}, true}, + {"invalid: CRI stopp failure", "unix:///var/run/crio/crio.sock", []string{"k8s_p1", "k8s_p2", "k8s_p3"}, true}, + {"valid: remove containers using docker", constants.DefaultDockerCRISocket, []string{"k8s_c1", "k8s_c2", "k8s_c3"}, false}, + {"invalid: docker rm failure", constants.DefaultDockerCRISocket, []string{"k8s_c1", "k8s_c2", "k8s_c3"}, true}, + {"invalid: docker stop failure", constants.DefaultDockerCRISocket, []string{"k8s_c1", "k8s_c2", "k8s_c3"}, true}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + runtime, err := NewContainerRuntime(&execer, tc.criSocket) + if err != nil { + t.Fatalf("unexpected NewContainerRuntime error: %v, criSocket: %s", err, tc.criSocket) + } + + err = runtime.RemoveContainers(tc.containers) + if !tc.isError && err != nil { + t.Errorf("unexpected RemoveContainers errors: %v, criSocket: %s, containers: %v", err, tc.criSocket, tc.containers) + } + if tc.isError && err == nil { + t.Errorf("unexpected RemoveContainers success, criSocket: %s, containers: %v", tc.criSocket, tc.containers) + } + }) + } +} + +func TestPullImage(t *testing.T) { + fcmd := fakeexec.FakeCmd{ + CombinedOutputScript: []fakeexec.FakeAction{ + func() ([]byte, []byte, error) { return nil, nil, nil }, + // If the pull fails, it will be retried 5 times (see PullImageRetry in constants/constants.go) + func() ([]byte, []byte, error) { return []byte("error"), nil, &fakeexec.FakeExitError{Status: 1} }, + func() ([]byte, []byte, error) { return []byte("error"), nil, &fakeexec.FakeExitError{Status: 1} }, + func() ([]byte, []byte, error) { return []byte("error"), nil, &fakeexec.FakeExitError{Status: 1} }, + func() ([]byte, []byte, error) { return []byte("error"), nil, &fakeexec.FakeExitError{Status: 1} }, + func() ([]byte, []byte, error) { return []byte("error"), nil, &fakeexec.FakeExitError{Status: 1} }, + func() ([]byte, []byte, error) { return nil, nil, nil }, + // If the pull fails, it will be retried 5 times (see PullImageRetry in constants/constants.go) + func() ([]byte, []byte, error) { return []byte("error"), nil, &fakeexec.FakeExitError{Status: 1} }, + func() ([]byte, []byte, error) { return []byte("error"), nil, &fakeexec.FakeExitError{Status: 1} }, + func() ([]byte, []byte, error) { return []byte("error"), nil, &fakeexec.FakeExitError{Status: 1} }, + func() ([]byte, []byte, error) { return []byte("error"), nil, &fakeexec.FakeExitError{Status: 1} }, + func() ([]byte, []byte, error) { return []byte("error"), nil, &fakeexec.FakeExitError{Status: 1} }, + }, + } + execer := fakeexec.FakeExec{ + CommandScript: genFakeActions(&fcmd, len(fcmd.CombinedOutputScript)), + LookPathFunc: func(cmd string) (string, error) { return "/usr/bin/crictl", nil }, + } + + cases := []struct { + name string + criSocket string + image string + isError bool + }{ + {"valid: pull image using CRI", "unix:///var/run/crio/crio.sock", "image1", false}, + {"invalid: CRI pull error", "unix:///var/run/crio/crio.sock", "image2", true}, + {"valid: pull image using docker", constants.DefaultDockerCRISocket, "image1", false}, + {"invalid: docker pull error", constants.DefaultDockerCRISocket, "image2", true}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + runtime, err := NewContainerRuntime(&execer, tc.criSocket) + if err != nil { + t.Fatalf("unexpected NewContainerRuntime error: %v, criSocket: %s", err, tc.criSocket) + } + + err = runtime.PullImage(tc.image) + if !tc.isError && err != nil { + t.Errorf("unexpected PullImage error: %v, criSocket: %s, image: %s", err, tc.criSocket, tc.image) + } + if tc.isError && err == nil { + t.Errorf("unexpected PullImage success, criSocket: %s, image: %s", tc.criSocket, tc.image) + } + }) + } +} + +func TestImageExists(t *testing.T) { + fcmd := fakeexec.FakeCmd{ + RunScript: []fakeexec.FakeAction{ + func() ([]byte, []byte, error) { return nil, nil, nil }, + func() ([]byte, []byte, error) { return nil, nil, &fakeexec.FakeExitError{Status: 1} }, + func() ([]byte, []byte, error) { return nil, nil, nil }, + func() ([]byte, []byte, error) { return nil, nil, &fakeexec.FakeExitError{Status: 1} }, + }, + } + execer := fakeexec.FakeExec{ + CommandScript: genFakeActions(&fcmd, len(fcmd.RunScript)), + LookPathFunc: func(cmd string) (string, error) { return "/usr/bin/crictl", nil }, + } + + cases := []struct { + name string + criSocket string + image string + result bool + }{ + {"valid: test if image exists using CRI", "unix:///var/run/crio/crio.sock", "image1", false}, + {"invalid: CRI inspecti failure", "unix:///var/run/crio/crio.sock", "image2", true}, + {"valid: test if image exists using docker", constants.DefaultDockerCRISocket, "image1", false}, + {"invalid: docker inspect failure", constants.DefaultDockerCRISocket, "image2", true}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + runtime, err := NewContainerRuntime(&execer, tc.criSocket) + if err != nil { + t.Fatalf("unexpected NewContainerRuntime error: %v, criSocket: %s", err, tc.criSocket) + } + + result, err := runtime.ImageExists(tc.image) + if !tc.result != result { + t.Errorf("unexpected ImageExists result: %t, criSocket: %s, image: %s, expected result: %t", err, tc.criSocket, tc.image, tc.result) + } + }) + } +} + +func TestIsExistingSocket(t *testing.T) { + // this test is not expected to work on Windows + if runtime.GOOS == "windows" { + return + } + + const tempPrefix = "test.kubeadm.runtime.isExistingSocket." + tests := []struct { + name string + proc func(*testing.T) + }{ + { + name: "Valid domain socket is detected as such", + proc: func(t *testing.T) { + tmpFile, err := ioutil.TempFile("", tempPrefix) + if err != nil { + t.Fatalf("unexpected error by TempFile: %v", err) + } + theSocket := tmpFile.Name() + os.Remove(theSocket) + tmpFile.Close() + + con, err := net.Listen("unix", theSocket) + if err != nil { + t.Fatalf("unexpected error while dialing a socket: %v", err) + } + defer con.Close() + + if !isExistingSocket(theSocket) { + t.Fatalf("isExistingSocket(%q) gave unexpected result. Should have been true, instead of false", theSocket) + } + }, + }, + { + name: "Regular file is not a domain socket", + proc: func(t *testing.T) { + tmpFile, err := ioutil.TempFile("", tempPrefix) + if err != nil { + t.Fatalf("unexpected error by TempFile: %v", err) + } + theSocket := tmpFile.Name() + defer os.Remove(theSocket) + tmpFile.Close() + + if isExistingSocket(theSocket) { + t.Fatalf("isExistingSocket(%q) gave unexpected result. Should have been false, instead of true", theSocket) + } + }, + }, + { + name: "Non existent socket is not a domain socket", + proc: func(t *testing.T) { + const theSocket = "/non/existent/socket" + if isExistingSocket(theSocket) { + t.Fatalf("isExistingSocket(%q) gave unexpected result. Should have been false, instead of true", theSocket) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, test.proc) + } +} + +func TestDetectCRISocketImpl(t *testing.T) { + tests := []struct { + name string + existingSockets []string + expectedError bool + expectedSocket string + }{ + { + name: "No existing sockets, use Docker", + existingSockets: []string{}, + expectedError: false, + expectedSocket: constants.DefaultDockerCRISocket, + }, + { + name: "One valid CRI socket leads to success", + existingSockets: []string{"/var/run/crio/crio.sock"}, + expectedError: false, + expectedSocket: "/var/run/crio/crio.sock", + }, + { + name: "Correct Docker CRI socket is returned", + existingSockets: []string{"/var/run/docker.sock"}, + expectedError: false, + expectedSocket: constants.DefaultDockerCRISocket, + }, + { + name: "CRI and Docker sockets lead to an error", + existingSockets: []string{ + "/var/run/docker.sock", + "/var/run/crio/crio.sock", + }, + expectedError: true, + }, + { + name: "Docker and containerd lead to Docker being used", + existingSockets: []string{ + "/var/run/docker.sock", + "/run/containerd/containerd.sock", + }, + expectedError: false, + expectedSocket: constants.DefaultDockerCRISocket, + }, + { + name: "A couple of CRI sockets lead to an error", + existingSockets: []string{ + "/var/run/crio/crio.sock", + "/run/containerd/containerd.sock", + }, + expectedError: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + socket, err := detectCRISocketImpl(func(path string) bool { + for _, existing := range test.existingSockets { + if path == existing { + return true + } + } + + return false + }) + if (err != nil) != test.expectedError { + t.Fatalf("detectCRISocketImpl returned unexpected result\n\tExpected error: %t\n\tGot error: %t", test.expectedError, err != nil) + } + if !test.expectedError && socket != test.expectedSocket { + t.Fatalf("detectCRISocketImpl returned unexpected CRI socket\n\tExpected socket: %s\n\tReturned socket: %s", + test.expectedSocket, socket) + } + }) + } +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_unix.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_unix.go new file mode 100644 index 00000000000..b15c3037313 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_unix.go @@ -0,0 +1,38 @@ +// +build !windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "os" +) + +const ( + dockerSocket = "/var/run/docker.sock" // The Docker socket is not CRI compatible + containerdSocket = "/run/containerd/containerd.sock" +) + +// isExistingSocket checks if path exists and is domain socket +func isExistingSocket(path string) bool { + fileInfo, err := os.Stat(path) + if err != nil { + return false + } + + return fileInfo.Mode()&os.ModeSocket != 0 +} diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_windows.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_windows.go new file mode 100644 index 00000000000..0c6a7b496dc --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_windows.go @@ -0,0 +1,38 @@ +// +build windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + winio "github.com/Microsoft/go-winio" +) + +const ( + dockerSocket = "//./pipe/docker_engine" // The Docker socket is not CRI compatible + containerdSocket = "//./pipe/containerd-containerd" // Proposed containerd named pipe for Windows +) + +// isExistingSocket checks if path exists and is domain socket +func isExistingSocket(path string) bool { + _, err := winio.DialPipe(path, nil) + if err != nil { + return false + } + + return true +} diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/register.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/register.go new file mode 100644 index 00000000000..cbebd990ab9 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/register.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "kubelet.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes registers known types to the given scheme +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KubeletConfiguration{}, + &SerializedNodeConfigSource{}, + ) + return nil +} diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/scheme/scheme.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/scheme/scheme.go new file mode 100644 index 00000000000..21deaddbe51 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/scheme/scheme.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + + kubeletconfig "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubelet/apis/config" + kubeletconfigv1beta1 "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1" +) + +// Utility functions for the Kubelet's kubeletconfig API group + +// NewSchemeAndCodecs is a utility function that returns a Scheme and CodecFactory +// that understand the types in the kubeletconfig API group. Passing mutators allows +// for adjusting the behavior of the CodecFactory, for example enable strict decoding. +func NewSchemeAndCodecs(mutators ...serializer.CodecFactoryOptionsMutator) (*runtime.Scheme, *serializer.CodecFactory, error) { + scheme := runtime.NewScheme() + if err := kubeletconfig.AddToScheme(scheme); err != nil { + return nil, nil, err + } + if err := kubeletconfigv1beta1.AddToScheme(scheme); err != nil { + return nil, nil, err + } + codecs := serializer.NewCodecFactory(scheme, mutators...) + return scheme, &codecs, nil +} diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/types.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/types.go new file mode 100644 index 00000000000..818a089465d --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/types.go @@ -0,0 +1,420 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// HairpinMode denotes how the kubelet should configure networking to handle +// hairpin packets. +type HairpinMode string + +// Enum settings for different ways to handle hairpin packets. +const ( + // Set the hairpin flag on the veth of containers in the respective + // container runtime. + HairpinVeth = "hairpin-veth" + // Make the container bridge promiscuous. This will force it to accept + // hairpin packets, even if the flag isn't set on ports of the bridge. + PromiscuousBridge = "promiscuous-bridge" + // Neither of the above. If the kubelet is started in this hairpin mode + // and kube-proxy is running in iptables mode, hairpin packets will be + // dropped by the container bridge. + HairpinNone = "none" +) + +// ResourceChangeDetectionStrategy denotes a mode in which internal +// managers (secret, configmap) are discovering object changes. +type ResourceChangeDetectionStrategy string + +// Enum settings for different strategies of kubelet managers. +const ( + // GetChangeDetectionStrategy is a mode in which kubelet fetches + // necessary objects directly from apiserver. + GetChangeDetectionStrategy ResourceChangeDetectionStrategy = "Get" + // TTLCacheChangeDetectionStrategy is a mode in which kubelet uses + // ttl cache for object directly fetched from apiserver. + TTLCacheChangeDetectionStrategy ResourceChangeDetectionStrategy = "Cache" + // WatchChangeDetectionStrategy is a mode in which kubelet uses + // watches to observe changes to objects that are in its interest. + WatchChangeDetectionStrategy ResourceChangeDetectionStrategy = "Watch" + // RestrictedTopologyManagerPolicy is a mode in which kubelet only allows + // pods with optimal NUMA node alignment for requested resources + RestrictedTopologyManagerPolicy = "restricted" + // BestEffortTopologyManagerPolicy is a mode in which kubelet will favour + // pods with NUMA alignment of CPU and device resources. + BestEffortTopologyManagerPolicy = "best-effort" + // NoneTopologyManager Policy is a mode in which kubelet has no knowledge + // of NUMA alignment of a pod's CPU and device resources. + NoneTopologyManagerPolicy = "none" + // SingleNumaNodeTopologyManager Policy iis a mode in which kubelet only allows + // pods with a single NUMA alignment of CPU and device resources. + SingleNumaNodeTopologyManager = "single-numa-node" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeletConfiguration contains the configuration for the Kubelet +type KubeletConfiguration struct { + metav1.TypeMeta + + // staticPodPath is the path to the directory containing local (static) pods to + // run, or the path to a single static pod file. + StaticPodPath string + // syncFrequency is the max period between synchronizing running + // containers and config + SyncFrequency metav1.Duration + // fileCheckFrequency is the duration between checking config files for + // new data + FileCheckFrequency metav1.Duration + // httpCheckFrequency is the duration between checking http for new data + HTTPCheckFrequency metav1.Duration + // staticPodURL is the URL for accessing static pods to run + StaticPodURL string + // staticPodURLHeader is a map of slices with HTTP headers to use when accessing the podURL + StaticPodURLHeader map[string][]string + // address is the IP address for the Kubelet to serve on (set to 0.0.0.0 + // for all interfaces) + Address string + // port is the port for the Kubelet to serve on. + Port int32 + // readOnlyPort is the read-only port for the Kubelet to serve on with + // no authentication/authorization (set to 0 to disable) + ReadOnlyPort int32 + // tlsCertFile is the file containing x509 Certificate for HTTPS. (CA cert, + // if any, concatenated after server cert). If tlsCertFile and + // tlsPrivateKeyFile are not provided, a self-signed certificate + // and key are generated for the public address and saved to the directory + // passed to the Kubelet's --cert-dir flag. + TLSCertFile string + // tlsPrivateKeyFile is the file containing x509 private key matching tlsCertFile + TLSPrivateKeyFile string + // TLSCipherSuites is the list of allowed cipher suites for the server. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + TLSCipherSuites []string + // TLSMinVersion is the minimum TLS version supported. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + TLSMinVersion string + // rotateCertificates enables client certificate rotation. The Kubelet will request a + // new certificate from the certificates.k8s.io API. This requires an approver to approve the + // certificate signing requests. The RotateKubeletClientCertificate feature + // must be enabled. + RotateCertificates bool + // serverTLSBootstrap enables server certificate bootstrap. Instead of self + // signing a serving certificate, the Kubelet will request a certificate from + // the certificates.k8s.io API. This requires an approver to approve the + // certificate signing requests. The RotateKubeletServerCertificate feature + // must be enabled. + ServerTLSBootstrap bool + // authentication specifies how requests to the Kubelet's server are authenticated + Authentication KubeletAuthentication + // authorization specifies how requests to the Kubelet's server are authorized + Authorization KubeletAuthorization + // registryPullQPS is the limit of registry pulls per second. + // Set to 0 for no limit. + RegistryPullQPS int32 + // registryBurst is the maximum size of bursty pulls, temporarily allows + // pulls to burst to this number, while still not exceeding registryPullQPS. + // Only used if registryPullQPS > 0. + RegistryBurst int32 + // eventRecordQPS is the maximum event creations per second. If 0, there + // is no limit enforced. + EventRecordQPS int32 + // eventBurst is the maximum size of a burst of event creations, temporarily + // allows event creations to burst to this number, while still not exceeding + // eventRecordQPS. Only used if eventRecordQPS > 0. + EventBurst int32 + // enableDebuggingHandlers enables server endpoints for log collection + // and local running of containers and commands + EnableDebuggingHandlers bool + // enableContentionProfiling enables lock contention profiling, if enableDebuggingHandlers is true. + EnableContentionProfiling bool + // healthzPort is the port of the localhost healthz endpoint (set to 0 to disable) + HealthzPort int32 + // healthzBindAddress is the IP address for the healthz server to serve on + HealthzBindAddress string + // oomScoreAdj is The oom-score-adj value for kubelet process. Values + // must be within the range [-1000, 1000]. + OOMScoreAdj int32 + // clusterDomain is the DNS domain for this cluster. If set, kubelet will + // configure all containers to search this domain in addition to the + // host's search domains. + ClusterDomain string + // clusterDNS is a list of IP addresses for a cluster DNS server. If set, + // kubelet will configure all containers to use this for DNS resolution + // instead of the host's DNS servers. + ClusterDNS []string + // streamingConnectionIdleTimeout is the maximum time a streaming connection + // can be idle before the connection is automatically closed. + StreamingConnectionIdleTimeout metav1.Duration + // nodeStatusUpdateFrequency is the frequency that kubelet computes node + // status. If node lease feature is not enabled, it is also the frequency that + // kubelet posts node status to master. In that case, be cautious when + // changing the constant, it must work with nodeMonitorGracePeriod in nodecontroller. + NodeStatusUpdateFrequency metav1.Duration + // nodeStatusReportFrequency is the frequency that kubelet posts node + // status to master if node status does not change. Kubelet will ignore this + // frequency and post node status immediately if any change is detected. It is + // only used when node lease feature is enabled. + NodeStatusReportFrequency metav1.Duration + // nodeLeaseDurationSeconds is the duration the Kubelet will set on its corresponding Lease. + NodeLeaseDurationSeconds int32 + // imageMinimumGCAge is the minimum age for an unused image before it is + // garbage collected. + ImageMinimumGCAge metav1.Duration + // imageGCHighThresholdPercent is the percent of disk usage after which + // image garbage collection is always run. The percent is calculated as + // this field value out of 100. + ImageGCHighThresholdPercent int32 + // imageGCLowThresholdPercent is the percent of disk usage before which + // image garbage collection is never run. Lowest disk usage to garbage + // collect to. The percent is calculated as this field value out of 100. + ImageGCLowThresholdPercent int32 + // How frequently to calculate and cache volume disk usage for all pods + VolumeStatsAggPeriod metav1.Duration + // KubeletCgroups is the absolute name of cgroups to isolate the kubelet in + KubeletCgroups string + // SystemCgroups is absolute name of cgroups in which to place + // all non-kernel processes that are not already in a container. Empty + // for no container. Rolling back the flag requires a reboot. + SystemCgroups string + // CgroupRoot is the root cgroup to use for pods. + // If CgroupsPerQOS is enabled, this is the root of the QoS cgroup hierarchy. + CgroupRoot string + // Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes + // And all Burstable and BestEffort pods are brought up under their + // specific top level QoS cgroup. + CgroupsPerQOS bool + // driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd) + CgroupDriver string + // CPUManagerPolicy is the name of the policy to use. + // Requires the CPUManager feature gate to be enabled. + CPUManagerPolicy string + // CPU Manager reconciliation period. + // Requires the CPUManager feature gate to be enabled. + CPUManagerReconcilePeriod metav1.Duration + // TopologyManagerPolicy is the name of the policy to use. + // Policies other than "none" require the TopologyManager feature gate to be enabled. + TopologyManagerPolicy string + // Map of QoS resource reservation percentages (memory only for now). + // Requires the QOSReserved feature gate to be enabled. + QOSReserved map[string]string + // runtimeRequestTimeout is the timeout for all runtime requests except long running + // requests - pull, logs, exec and attach. + RuntimeRequestTimeout metav1.Duration + // hairpinMode specifies how the Kubelet should configure the container + // bridge for hairpin packets. + // Setting this flag allows endpoints in a Service to loadbalance back to + // themselves if they should try to access their own Service. Values: + // "promiscuous-bridge": make the container bridge promiscuous. + // "hairpin-veth": set the hairpin flag on container veth interfaces. + // "none": do nothing. + // Generally, one must set --hairpin-mode=hairpin-veth to achieve hairpin NAT, + // because promiscuous-bridge assumes the existence of a container bridge named cbr0. + HairpinMode string + // maxPods is the number of pods that can run on this Kubelet. + MaxPods int32 + // The CIDR to use for pod IP addresses, only used in standalone mode. + // In cluster mode, this is obtained from the master. + PodCIDR string + // The maximum number of processes per pod. If -1, the kubelet defaults to the node allocatable pid capacity. + PodPidsLimit int64 + // ResolverConfig is the resolver configuration file used as the basis + // for the container DNS resolution configuration. + ResolverConfig string + // cpuCFSQuota enables CPU CFS quota enforcement for containers that + // specify CPU limits + CPUCFSQuota bool + // CPUCFSQuotaPeriod sets the CPU CFS quota period value, cpu.cfs_period_us, defaults to 100ms + CPUCFSQuotaPeriod metav1.Duration + // maxOpenFiles is Number of files that can be opened by Kubelet process. + MaxOpenFiles int64 + // contentType is contentType of requests sent to apiserver. + ContentType string + // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver + KubeAPIQPS int32 + // kubeAPIBurst is the burst to allow while talking with kubernetes + // apiserver + KubeAPIBurst int32 + // serializeImagePulls when enabled, tells the Kubelet to pull images one at a time. + SerializeImagePulls bool + // Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}. + EvictionHard map[string]string + // Map of signal names to quantities that defines soft eviction thresholds. For example: {"memory.available": "300Mi"}. + EvictionSoft map[string]string + // Map of signal names to quantities that defines grace periods for each soft eviction signal. For example: {"memory.available": "30s"}. + EvictionSoftGracePeriod map[string]string + // Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. + EvictionPressureTransitionPeriod metav1.Duration + // Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. + EvictionMaxPodGracePeriod int32 + // Map of signal names to quantities that defines minimum reclaims, which describe the minimum + // amount of a given resource the kubelet will reclaim when performing a pod eviction while + // that resource is under pressure. For example: {"imagefs.available": "2Gi"} + EvictionMinimumReclaim map[string]string + // podsPerCore is the maximum number of pods per core. Cannot exceed MaxPods. + // If 0, this field is ignored. + PodsPerCore int32 + // enableControllerAttachDetach enables the Attach/Detach controller to + // manage attachment/detachment of volumes scheduled to this node, and + // disables kubelet from executing any attach/detach operations + EnableControllerAttachDetach bool + // protectKernelDefaults, if true, causes the Kubelet to error if kernel + // flags are not as it expects. Otherwise the Kubelet will attempt to modify + // kernel flags to match its expectation. + ProtectKernelDefaults bool + // If true, Kubelet ensures a set of iptables rules are present on host. + // These rules will serve as utility for various components, e.g. kube-proxy. + // The rules will be created based on IPTablesMasqueradeBit and IPTablesDropBit. + MakeIPTablesUtilChains bool + // iptablesMasqueradeBit is the bit of the iptables fwmark space to mark for SNAT + // Values must be within the range [0, 31]. Must be different from other mark bits. + // Warning: Please match the value of the corresponding parameter in kube-proxy. + // TODO: clean up IPTablesMasqueradeBit in kube-proxy + IPTablesMasqueradeBit int32 + // iptablesDropBit is the bit of the iptables fwmark space to mark for dropping packets. + // Values must be within the range [0, 31]. Must be different from other mark bits. + IPTablesDropBit int32 + // featureGates is a map of feature names to bools that enable or disable alpha/experimental + // features. This field modifies piecemeal the built-in default values from + // "k8s.io/kubernetes/pkg/features/kube_features.go". + FeatureGates map[string]bool + // Tells the Kubelet to fail to start if swap is enabled on the node. + FailSwapOn bool + // A quantity defines the maximum size of the container log file before it is rotated. For example: "5Mi" or "256Ki". + ContainerLogMaxSize string + // Maximum number of container log files that can be present for a container. + ContainerLogMaxFiles int32 + // ConfigMapAndSecretChangeDetectionStrategy is a mode in which config map and secret managers are running. + ConfigMapAndSecretChangeDetectionStrategy ResourceChangeDetectionStrategy + // A comma separated whitelist of unsafe sysctls or sysctl patterns (ending in *). + // Unsafe sysctl groups are kernel.shm*, kernel.msg*, kernel.sem, fs.mqueue.*, and net.*. + // These sysctls are namespaced but not allowed by default. For example: "kernel.msg*,net.ipv4.route.min_pmtu" + // +optional + AllowedUnsafeSysctls []string + + /* the following fields are meant for Node Allocatable */ + + // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G,pid=100) pairs + // that describe resources reserved for non-kubernetes components. + // Currently only cpu and memory are supported. + // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. + SystemReserved map[string]string + // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G,pid=100) pairs + // that describe resources reserved for kubernetes system components. + // Currently cpu, memory and local ephemeral storage for root file system are supported. + // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. + KubeReserved map[string]string + // This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. + SystemReservedCgroup string + // This flag helps kubelet identify absolute name of top level cgroup used to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. + KubeReservedCgroup string + // This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform. + // This flag accepts a list of options. Acceptable options are `pods`, `system-reserved` & `kube-reserved`. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. + EnforceNodeAllocatable []string + // This option specifies the cpu list reserved for the host level system threads and kubernetes related threads. + // This provide a "static" CPU list rather than the "dynamic" list by system-reserved and kube-reserved. + // This option overwrites CPUs provided by system-reserved and kube-reserved. + ReservedSystemCPUs string + // The previous version for which you want to show hidden metrics. + // Only the previous minor version is meaningful, other values will not be allowed. + // The format is ., e.g.: '1.16'. + // The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, + // rather than being surprised when they are permanently removed in the release after that. + ShowHiddenMetricsForVersion string +} + +// KubeletAuthorizationMode denotes the authorization mode for the kubelet +type KubeletAuthorizationMode string + +const ( + // KubeletAuthorizationModeAlwaysAllow authorizes all authenticated requests + KubeletAuthorizationModeAlwaysAllow KubeletAuthorizationMode = "AlwaysAllow" + // KubeletAuthorizationModeWebhook uses the SubjectAccessReview API to determine authorization + KubeletAuthorizationModeWebhook KubeletAuthorizationMode = "Webhook" +) + +// KubeletAuthorization holds the state related to the authorization in the kublet. +type KubeletAuthorization struct { + // mode is the authorization mode to apply to requests to the kubelet server. + // Valid values are AlwaysAllow and Webhook. + // Webhook mode uses the SubjectAccessReview API to determine authorization. + Mode KubeletAuthorizationMode + + // webhook contains settings related to Webhook authorization. + Webhook KubeletWebhookAuthorization +} + +// KubeletWebhookAuthorization holds the state related to the Webhook +// Authorization in the Kubelet. +type KubeletWebhookAuthorization struct { + // cacheAuthorizedTTL is the duration to cache 'authorized' responses from the webhook authorizer. + CacheAuthorizedTTL metav1.Duration + // cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from the webhook authorizer. + CacheUnauthorizedTTL metav1.Duration +} + +// KubeletAuthentication holds the Kubetlet Authentication setttings. +type KubeletAuthentication struct { + // x509 contains settings related to x509 client certificate authentication + X509 KubeletX509Authentication + // webhook contains settings related to webhook bearer token authentication + Webhook KubeletWebhookAuthentication + // anonymous contains settings related to anonymous authentication + Anonymous KubeletAnonymousAuthentication +} + +// KubeletX509Authentication contains settings related to x509 client certificate authentication +type KubeletX509Authentication struct { + // clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request presenting a client certificate + // signed by one of the authorities in the bundle is authenticated with a username corresponding to the CommonName, + // and groups corresponding to the Organization in the client certificate. + ClientCAFile string +} + +// KubeletWebhookAuthentication contains settings related to webhook authentication +type KubeletWebhookAuthentication struct { + // enabled allows bearer token authentication backed by the tokenreviews.authentication.k8s.io API + Enabled bool + // cacheTTL enables caching of authentication results + CacheTTL metav1.Duration +} + +// KubeletAnonymousAuthentication enables anonymous requests to the kubetlet server. +type KubeletAnonymousAuthentication struct { + // enabled allows anonymous requests to the kubelet server. + // Requests that are not rejected by another authentication method are treated as anonymous requests. + // Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. + Enabled bool +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SerializedNodeConfigSource allows us to serialize NodeConfigSource +// This type is used internally by the Kubelet for tracking checkpointed dynamic configs. +// It exists in the kubeletconfig API group because it is classified as a versioned input to the Kubelet. +type SerializedNodeConfigSource struct { + metav1.TypeMeta + // Source is the source that we are serializing + // +optional + Source v1.NodeConfigSource +} diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults.go new file mode 100644 index 00000000000..60b0e588cc9 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults.go @@ -0,0 +1,232 @@ +/* +Copyright 2015 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kruntime "k8s.io/apimachinery/pkg/runtime" + kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" + utilpointer "k8s.io/utils/pointer" +) + +const ( + // TODO: Move these constants to k8s.io/kubelet/config/v1beta1 instead? + DefaultIPTablesMasqueradeBit = 14 + DefaultIPTablesDropBit = 15 + + KubeletOOMScoreAdj int = -999 + + // system default DNS resolver configuration + ResolvConfDefault = "/etc/resolv.conf" + + // KubeletPort is the default port for the kubelet server on each host machine. + // May be overridden by a flag at startup. + KubeletPort = 10250 +) + +var ( + zeroDuration = metav1.Duration{} + // TODO: Move these constants to k8s.io/kubelet/config/v1beta1 instead? + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. + DefaultNodeAllocatableEnforcement = []string{"pods"} +) + +func addDefaultingFuncs(scheme *kruntime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_KubeletConfiguration(obj *kubeletconfigv1beta1.KubeletConfiguration) { + if obj.SyncFrequency == zeroDuration { + obj.SyncFrequency = metav1.Duration{Duration: 1 * time.Minute} + } + if obj.FileCheckFrequency == zeroDuration { + obj.FileCheckFrequency = metav1.Duration{Duration: 20 * time.Second} + } + if obj.HTTPCheckFrequency == zeroDuration { + obj.HTTPCheckFrequency = metav1.Duration{Duration: 20 * time.Second} + } + if obj.Address == "" { + obj.Address = "0.0.0.0" + } + if obj.Port == 0 { + obj.Port = KubeletPort + } + if obj.Authentication.Anonymous.Enabled == nil { + obj.Authentication.Anonymous.Enabled = utilpointer.BoolPtr(false) + } + if obj.Authentication.Webhook.Enabled == nil { + obj.Authentication.Webhook.Enabled = utilpointer.BoolPtr(true) + } + if obj.Authentication.Webhook.CacheTTL == zeroDuration { + obj.Authentication.Webhook.CacheTTL = metav1.Duration{Duration: 2 * time.Minute} + } + if obj.Authorization.Mode == "" { + obj.Authorization.Mode = kubeletconfigv1beta1.KubeletAuthorizationModeWebhook + } + if obj.Authorization.Webhook.CacheAuthorizedTTL == zeroDuration { + obj.Authorization.Webhook.CacheAuthorizedTTL = metav1.Duration{Duration: 5 * time.Minute} + } + if obj.Authorization.Webhook.CacheUnauthorizedTTL == zeroDuration { + obj.Authorization.Webhook.CacheUnauthorizedTTL = metav1.Duration{Duration: 30 * time.Second} + } + if obj.RegistryPullQPS == nil { + obj.RegistryPullQPS = utilpointer.Int32Ptr(5) + } + if obj.RegistryBurst == 0 { + obj.RegistryBurst = 10 + } + if obj.EventRecordQPS == nil { + obj.EventRecordQPS = utilpointer.Int32Ptr(5) + } + if obj.EventBurst == 0 { + obj.EventBurst = 10 + } + if obj.EnableDebuggingHandlers == nil { + obj.EnableDebuggingHandlers = utilpointer.BoolPtr(true) + } + if obj.HealthzPort == nil { + obj.HealthzPort = utilpointer.Int32Ptr(10248) + } + if obj.HealthzBindAddress == "" { + obj.HealthzBindAddress = "127.0.0.1" + } + if obj.OOMScoreAdj == nil { + obj.OOMScoreAdj = utilpointer.Int32Ptr(int32(KubeletOOMScoreAdj)) + } + if obj.StreamingConnectionIdleTimeout == zeroDuration { + obj.StreamingConnectionIdleTimeout = metav1.Duration{Duration: 4 * time.Hour} + } + if obj.NodeStatusReportFrequency == zeroDuration { + // For backward compatibility, NodeStatusReportFrequency's default value is + // set to NodeStatusUpdateFrequency if NodeStatusUpdateFrequency is set + // explicitly. + if obj.NodeStatusUpdateFrequency == zeroDuration { + obj.NodeStatusReportFrequency = metav1.Duration{Duration: 5 * time.Minute} + } else { + obj.NodeStatusReportFrequency = obj.NodeStatusUpdateFrequency + } + } + if obj.NodeStatusUpdateFrequency == zeroDuration { + obj.NodeStatusUpdateFrequency = metav1.Duration{Duration: 10 * time.Second} + } + if obj.NodeLeaseDurationSeconds == 0 { + obj.NodeLeaseDurationSeconds = 40 + } + if obj.ImageMinimumGCAge == zeroDuration { + obj.ImageMinimumGCAge = metav1.Duration{Duration: 2 * time.Minute} + } + if obj.ImageGCHighThresholdPercent == nil { + // default is below docker's default dm.min_free_space of 90% + obj.ImageGCHighThresholdPercent = utilpointer.Int32Ptr(85) + } + if obj.ImageGCLowThresholdPercent == nil { + obj.ImageGCLowThresholdPercent = utilpointer.Int32Ptr(80) + } + if obj.VolumeStatsAggPeriod == zeroDuration { + obj.VolumeStatsAggPeriod = metav1.Duration{Duration: time.Minute} + } + if obj.CgroupsPerQOS == nil { + obj.CgroupsPerQOS = utilpointer.BoolPtr(true) + } + if obj.CgroupDriver == "" { + obj.CgroupDriver = "cgroupfs" + } + if obj.CPUManagerPolicy == "" { + obj.CPUManagerPolicy = "none" + } + if obj.CPUManagerReconcilePeriod == zeroDuration { + // Keep the same as default NodeStatusUpdateFrequency + obj.CPUManagerReconcilePeriod = metav1.Duration{Duration: 10 * time.Second} + } + if obj.TopologyManagerPolicy == "" { + obj.TopologyManagerPolicy = kubeletconfigv1beta1.NoneTopologyManagerPolicy + } + if obj.RuntimeRequestTimeout == zeroDuration { + obj.RuntimeRequestTimeout = metav1.Duration{Duration: 2 * time.Minute} + } + if obj.HairpinMode == "" { + obj.HairpinMode = kubeletconfigv1beta1.PromiscuousBridge + } + if obj.MaxPods == 0 { + obj.MaxPods = 110 + } + // default nil or negative value to -1 (implies node allocatable pid limit) + if obj.PodPidsLimit == nil || *obj.PodPidsLimit < int64(0) { + temp := int64(-1) + obj.PodPidsLimit = &temp + } + if obj.ResolverConfig == "" { + obj.ResolverConfig = ResolvConfDefault + } + if obj.CPUCFSQuota == nil { + obj.CPUCFSQuota = utilpointer.BoolPtr(true) + } + if obj.CPUCFSQuotaPeriod == nil { + obj.CPUCFSQuotaPeriod = &metav1.Duration{Duration: 100 * time.Millisecond} + } + if obj.MaxOpenFiles == 0 { + obj.MaxOpenFiles = 1000000 + } + if obj.ContentType == "" { + obj.ContentType = "application/vnd.kubernetes.protobuf" + } + if obj.KubeAPIQPS == nil { + obj.KubeAPIQPS = utilpointer.Int32Ptr(5) + } + if obj.KubeAPIBurst == 0 { + obj.KubeAPIBurst = 10 + } + if obj.SerializeImagePulls == nil { + obj.SerializeImagePulls = utilpointer.BoolPtr(true) + } + if obj.EvictionHard == nil { + obj.EvictionHard = DefaultEvictionHard + } + if obj.EvictionPressureTransitionPeriod == zeroDuration { + obj.EvictionPressureTransitionPeriod = metav1.Duration{Duration: 5 * time.Minute} + } + if obj.EnableControllerAttachDetach == nil { + obj.EnableControllerAttachDetach = utilpointer.BoolPtr(true) + } + if obj.MakeIPTablesUtilChains == nil { + obj.MakeIPTablesUtilChains = utilpointer.BoolPtr(true) + } + if obj.IPTablesMasqueradeBit == nil { + obj.IPTablesMasqueradeBit = utilpointer.Int32Ptr(DefaultIPTablesMasqueradeBit) + } + if obj.IPTablesDropBit == nil { + obj.IPTablesDropBit = utilpointer.Int32Ptr(DefaultIPTablesDropBit) + } + if obj.FailSwapOn == nil { + obj.FailSwapOn = utilpointer.BoolPtr(true) + } + if obj.ContainerLogMaxSize == "" { + obj.ContainerLogMaxSize = "10Mi" + } + if obj.ContainerLogMaxFiles == nil { + obj.ContainerLogMaxFiles = utilpointer.Int32Ptr(5) + } + if obj.ConfigMapAndSecretChangeDetectionStrategy == "" { + obj.ConfigMapAndSecretChangeDetectionStrategy = kubeletconfigv1beta1.WatchChangeDetectionStrategy + } + if obj.EnforceNodeAllocatable == nil { + obj.EnforceNodeAllocatable = DefaultNodeAllocatableEnforcement + } +} diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_linux.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_linux.go new file mode 100644 index 00000000000..7e1060a03e4 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_linux.go @@ -0,0 +1,27 @@ +// +build linux + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// DefaultEvictionHard includes default options for hard eviction. +var DefaultEvictionHard = map[string]string{ + "memory.available": "100Mi", + "nodefs.available": "10%", + "nodefs.inodesFree": "5%", + "imagefs.available": "15%", +} diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_others.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_others.go new file mode 100644 index 00000000000..74464a3c840 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_others.go @@ -0,0 +1,26 @@ +// +build !linux + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// DefaultEvictionHard includes default options for hard eviction. +var DefaultEvictionHard = map[string]string{ + "memory.available": "100Mi", + "nodefs.available": "10%", + "imagefs.available": "15%", +} diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/register.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/register.go new file mode 100644 index 00000000000..1b981b5791d --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/register.go @@ -0,0 +1,43 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" +) + +// GroupName is the group name used in this package +const GroupName = "kubelet.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +var ( + // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, + // defaulting and conversion init funcs are registered as well. + localSchemeBuilder = &kubeletconfigv1beta1.SchemeBuilder + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs) +} diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.conversion.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.conversion.go new file mode 100644 index 00000000000..f9d180049f3 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.conversion.go @@ -0,0 +1,564 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + v1beta1 "k8s.io/kubelet/config/v1beta1" + + config "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubelet/apis/config" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1beta1.KubeletAnonymousAuthentication)(nil), (*config.KubeletAnonymousAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeletAnonymousAuthentication_To_config_KubeletAnonymousAuthentication(a.(*v1beta1.KubeletAnonymousAuthentication), b.(*config.KubeletAnonymousAuthentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.KubeletAnonymousAuthentication)(nil), (*v1beta1.KubeletAnonymousAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KubeletAnonymousAuthentication_To_v1beta1_KubeletAnonymousAuthentication(a.(*config.KubeletAnonymousAuthentication), b.(*v1beta1.KubeletAnonymousAuthentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.KubeletAuthentication)(nil), (*config.KubeletAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeletAuthentication_To_config_KubeletAuthentication(a.(*v1beta1.KubeletAuthentication), b.(*config.KubeletAuthentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.KubeletAuthentication)(nil), (*v1beta1.KubeletAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KubeletAuthentication_To_v1beta1_KubeletAuthentication(a.(*config.KubeletAuthentication), b.(*v1beta1.KubeletAuthentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.KubeletAuthorization)(nil), (*config.KubeletAuthorization)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeletAuthorization_To_config_KubeletAuthorization(a.(*v1beta1.KubeletAuthorization), b.(*config.KubeletAuthorization), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.KubeletAuthorization)(nil), (*v1beta1.KubeletAuthorization)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KubeletAuthorization_To_v1beta1_KubeletAuthorization(a.(*config.KubeletAuthorization), b.(*v1beta1.KubeletAuthorization), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.KubeletConfiguration)(nil), (*config.KubeletConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(a.(*v1beta1.KubeletConfiguration), b.(*config.KubeletConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.KubeletConfiguration)(nil), (*v1beta1.KubeletConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(a.(*config.KubeletConfiguration), b.(*v1beta1.KubeletConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.KubeletWebhookAuthentication)(nil), (*config.KubeletWebhookAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeletWebhookAuthentication_To_config_KubeletWebhookAuthentication(a.(*v1beta1.KubeletWebhookAuthentication), b.(*config.KubeletWebhookAuthentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.KubeletWebhookAuthentication)(nil), (*v1beta1.KubeletWebhookAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KubeletWebhookAuthentication_To_v1beta1_KubeletWebhookAuthentication(a.(*config.KubeletWebhookAuthentication), b.(*v1beta1.KubeletWebhookAuthentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.KubeletWebhookAuthorization)(nil), (*config.KubeletWebhookAuthorization)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeletWebhookAuthorization_To_config_KubeletWebhookAuthorization(a.(*v1beta1.KubeletWebhookAuthorization), b.(*config.KubeletWebhookAuthorization), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.KubeletWebhookAuthorization)(nil), (*v1beta1.KubeletWebhookAuthorization)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KubeletWebhookAuthorization_To_v1beta1_KubeletWebhookAuthorization(a.(*config.KubeletWebhookAuthorization), b.(*v1beta1.KubeletWebhookAuthorization), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.KubeletX509Authentication)(nil), (*config.KubeletX509Authentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeletX509Authentication_To_config_KubeletX509Authentication(a.(*v1beta1.KubeletX509Authentication), b.(*config.KubeletX509Authentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.KubeletX509Authentication)(nil), (*v1beta1.KubeletX509Authentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KubeletX509Authentication_To_v1beta1_KubeletX509Authentication(a.(*config.KubeletX509Authentication), b.(*v1beta1.KubeletX509Authentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.SerializedNodeConfigSource)(nil), (*config.SerializedNodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SerializedNodeConfigSource_To_config_SerializedNodeConfigSource(a.(*v1beta1.SerializedNodeConfigSource), b.(*config.SerializedNodeConfigSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SerializedNodeConfigSource)(nil), (*v1beta1.SerializedNodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource(a.(*config.SerializedNodeConfigSource), b.(*v1beta1.SerializedNodeConfigSource), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_KubeletAnonymousAuthentication_To_config_KubeletAnonymousAuthentication(in *v1beta1.KubeletAnonymousAuthentication, out *config.KubeletAnonymousAuthentication, s conversion.Scope) error { + if err := v1.Convert_Pointer_bool_To_bool(&in.Enabled, &out.Enabled, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_KubeletAnonymousAuthentication_To_config_KubeletAnonymousAuthentication is an autogenerated conversion function. +func Convert_v1beta1_KubeletAnonymousAuthentication_To_config_KubeletAnonymousAuthentication(in *v1beta1.KubeletAnonymousAuthentication, out *config.KubeletAnonymousAuthentication, s conversion.Scope) error { + return autoConvert_v1beta1_KubeletAnonymousAuthentication_To_config_KubeletAnonymousAuthentication(in, out, s) +} + +func autoConvert_config_KubeletAnonymousAuthentication_To_v1beta1_KubeletAnonymousAuthentication(in *config.KubeletAnonymousAuthentication, out *v1beta1.KubeletAnonymousAuthentication, s conversion.Scope) error { + if err := v1.Convert_bool_To_Pointer_bool(&in.Enabled, &out.Enabled, s); err != nil { + return err + } + return nil +} + +// Convert_config_KubeletAnonymousAuthentication_To_v1beta1_KubeletAnonymousAuthentication is an autogenerated conversion function. +func Convert_config_KubeletAnonymousAuthentication_To_v1beta1_KubeletAnonymousAuthentication(in *config.KubeletAnonymousAuthentication, out *v1beta1.KubeletAnonymousAuthentication, s conversion.Scope) error { + return autoConvert_config_KubeletAnonymousAuthentication_To_v1beta1_KubeletAnonymousAuthentication(in, out, s) +} + +func autoConvert_v1beta1_KubeletAuthentication_To_config_KubeletAuthentication(in *v1beta1.KubeletAuthentication, out *config.KubeletAuthentication, s conversion.Scope) error { + if err := Convert_v1beta1_KubeletX509Authentication_To_config_KubeletX509Authentication(&in.X509, &out.X509, s); err != nil { + return err + } + if err := Convert_v1beta1_KubeletWebhookAuthentication_To_config_KubeletWebhookAuthentication(&in.Webhook, &out.Webhook, s); err != nil { + return err + } + if err := Convert_v1beta1_KubeletAnonymousAuthentication_To_config_KubeletAnonymousAuthentication(&in.Anonymous, &out.Anonymous, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_KubeletAuthentication_To_config_KubeletAuthentication is an autogenerated conversion function. +func Convert_v1beta1_KubeletAuthentication_To_config_KubeletAuthentication(in *v1beta1.KubeletAuthentication, out *config.KubeletAuthentication, s conversion.Scope) error { + return autoConvert_v1beta1_KubeletAuthentication_To_config_KubeletAuthentication(in, out, s) +} + +func autoConvert_config_KubeletAuthentication_To_v1beta1_KubeletAuthentication(in *config.KubeletAuthentication, out *v1beta1.KubeletAuthentication, s conversion.Scope) error { + if err := Convert_config_KubeletX509Authentication_To_v1beta1_KubeletX509Authentication(&in.X509, &out.X509, s); err != nil { + return err + } + if err := Convert_config_KubeletWebhookAuthentication_To_v1beta1_KubeletWebhookAuthentication(&in.Webhook, &out.Webhook, s); err != nil { + return err + } + if err := Convert_config_KubeletAnonymousAuthentication_To_v1beta1_KubeletAnonymousAuthentication(&in.Anonymous, &out.Anonymous, s); err != nil { + return err + } + return nil +} + +// Convert_config_KubeletAuthentication_To_v1beta1_KubeletAuthentication is an autogenerated conversion function. +func Convert_config_KubeletAuthentication_To_v1beta1_KubeletAuthentication(in *config.KubeletAuthentication, out *v1beta1.KubeletAuthentication, s conversion.Scope) error { + return autoConvert_config_KubeletAuthentication_To_v1beta1_KubeletAuthentication(in, out, s) +} + +func autoConvert_v1beta1_KubeletAuthorization_To_config_KubeletAuthorization(in *v1beta1.KubeletAuthorization, out *config.KubeletAuthorization, s conversion.Scope) error { + out.Mode = config.KubeletAuthorizationMode(in.Mode) + if err := Convert_v1beta1_KubeletWebhookAuthorization_To_config_KubeletWebhookAuthorization(&in.Webhook, &out.Webhook, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_KubeletAuthorization_To_config_KubeletAuthorization is an autogenerated conversion function. +func Convert_v1beta1_KubeletAuthorization_To_config_KubeletAuthorization(in *v1beta1.KubeletAuthorization, out *config.KubeletAuthorization, s conversion.Scope) error { + return autoConvert_v1beta1_KubeletAuthorization_To_config_KubeletAuthorization(in, out, s) +} + +func autoConvert_config_KubeletAuthorization_To_v1beta1_KubeletAuthorization(in *config.KubeletAuthorization, out *v1beta1.KubeletAuthorization, s conversion.Scope) error { + out.Mode = v1beta1.KubeletAuthorizationMode(in.Mode) + if err := Convert_config_KubeletWebhookAuthorization_To_v1beta1_KubeletWebhookAuthorization(&in.Webhook, &out.Webhook, s); err != nil { + return err + } + return nil +} + +// Convert_config_KubeletAuthorization_To_v1beta1_KubeletAuthorization is an autogenerated conversion function. +func Convert_config_KubeletAuthorization_To_v1beta1_KubeletAuthorization(in *config.KubeletAuthorization, out *v1beta1.KubeletAuthorization, s conversion.Scope) error { + return autoConvert_config_KubeletAuthorization_To_v1beta1_KubeletAuthorization(in, out, s) +} + +func autoConvert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(in *v1beta1.KubeletConfiguration, out *config.KubeletConfiguration, s conversion.Scope) error { + out.StaticPodPath = in.StaticPodPath + out.SyncFrequency = in.SyncFrequency + out.FileCheckFrequency = in.FileCheckFrequency + out.HTTPCheckFrequency = in.HTTPCheckFrequency + out.StaticPodURL = in.StaticPodURL + out.StaticPodURLHeader = *(*map[string][]string)(unsafe.Pointer(&in.StaticPodURLHeader)) + out.Address = in.Address + out.Port = in.Port + out.ReadOnlyPort = in.ReadOnlyPort + out.TLSCertFile = in.TLSCertFile + out.TLSPrivateKeyFile = in.TLSPrivateKeyFile + out.TLSCipherSuites = *(*[]string)(unsafe.Pointer(&in.TLSCipherSuites)) + out.TLSMinVersion = in.TLSMinVersion + out.RotateCertificates = in.RotateCertificates + out.ServerTLSBootstrap = in.ServerTLSBootstrap + if err := Convert_v1beta1_KubeletAuthentication_To_config_KubeletAuthentication(&in.Authentication, &out.Authentication, s); err != nil { + return err + } + if err := Convert_v1beta1_KubeletAuthorization_To_config_KubeletAuthorization(&in.Authorization, &out.Authorization, s); err != nil { + return err + } + if err := v1.Convert_Pointer_int32_To_int32(&in.RegistryPullQPS, &out.RegistryPullQPS, s); err != nil { + return err + } + out.RegistryBurst = in.RegistryBurst + if err := v1.Convert_Pointer_int32_To_int32(&in.EventRecordQPS, &out.EventRecordQPS, s); err != nil { + return err + } + out.EventBurst = in.EventBurst + if err := v1.Convert_Pointer_bool_To_bool(&in.EnableDebuggingHandlers, &out.EnableDebuggingHandlers, s); err != nil { + return err + } + out.EnableContentionProfiling = in.EnableContentionProfiling + if err := v1.Convert_Pointer_int32_To_int32(&in.HealthzPort, &out.HealthzPort, s); err != nil { + return err + } + out.HealthzBindAddress = in.HealthzBindAddress + if err := v1.Convert_Pointer_int32_To_int32(&in.OOMScoreAdj, &out.OOMScoreAdj, s); err != nil { + return err + } + out.ClusterDomain = in.ClusterDomain + out.ClusterDNS = *(*[]string)(unsafe.Pointer(&in.ClusterDNS)) + out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout + out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency + out.NodeStatusReportFrequency = in.NodeStatusReportFrequency + out.NodeLeaseDurationSeconds = in.NodeLeaseDurationSeconds + out.ImageMinimumGCAge = in.ImageMinimumGCAge + if err := v1.Convert_Pointer_int32_To_int32(&in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent, s); err != nil { + return err + } + if err := v1.Convert_Pointer_int32_To_int32(&in.ImageGCLowThresholdPercent, &out.ImageGCLowThresholdPercent, s); err != nil { + return err + } + out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod + out.KubeletCgroups = in.KubeletCgroups + out.SystemCgroups = in.SystemCgroups + out.CgroupRoot = in.CgroupRoot + if err := v1.Convert_Pointer_bool_To_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil { + return err + } + out.CgroupDriver = in.CgroupDriver + out.CPUManagerPolicy = in.CPUManagerPolicy + out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod + out.TopologyManagerPolicy = in.TopologyManagerPolicy + out.QOSReserved = *(*map[string]string)(unsafe.Pointer(&in.QOSReserved)) + out.RuntimeRequestTimeout = in.RuntimeRequestTimeout + out.HairpinMode = in.HairpinMode + out.MaxPods = in.MaxPods + out.PodCIDR = in.PodCIDR + if err := v1.Convert_Pointer_int64_To_int64(&in.PodPidsLimit, &out.PodPidsLimit, s); err != nil { + return err + } + out.ResolverConfig = in.ResolverConfig + if err := v1.Convert_Pointer_bool_To_bool(&in.CPUCFSQuota, &out.CPUCFSQuota, s); err != nil { + return err + } + if err := v1.Convert_Pointer_v1_Duration_To_v1_Duration(&in.CPUCFSQuotaPeriod, &out.CPUCFSQuotaPeriod, s); err != nil { + return err + } + out.MaxOpenFiles = in.MaxOpenFiles + out.ContentType = in.ContentType + if err := v1.Convert_Pointer_int32_To_int32(&in.KubeAPIQPS, &out.KubeAPIQPS, s); err != nil { + return err + } + out.KubeAPIBurst = in.KubeAPIBurst + if err := v1.Convert_Pointer_bool_To_bool(&in.SerializeImagePulls, &out.SerializeImagePulls, s); err != nil { + return err + } + out.EvictionHard = *(*map[string]string)(unsafe.Pointer(&in.EvictionHard)) + out.EvictionSoft = *(*map[string]string)(unsafe.Pointer(&in.EvictionSoft)) + out.EvictionSoftGracePeriod = *(*map[string]string)(unsafe.Pointer(&in.EvictionSoftGracePeriod)) + out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod + out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod + out.EvictionMinimumReclaim = *(*map[string]string)(unsafe.Pointer(&in.EvictionMinimumReclaim)) + out.PodsPerCore = in.PodsPerCore + if err := v1.Convert_Pointer_bool_To_bool(&in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach, s); err != nil { + return err + } + out.ProtectKernelDefaults = in.ProtectKernelDefaults + if err := v1.Convert_Pointer_bool_To_bool(&in.MakeIPTablesUtilChains, &out.MakeIPTablesUtilChains, s); err != nil { + return err + } + if err := v1.Convert_Pointer_int32_To_int32(&in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit, s); err != nil { + return err + } + if err := v1.Convert_Pointer_int32_To_int32(&in.IPTablesDropBit, &out.IPTablesDropBit, s); err != nil { + return err + } + out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + if err := v1.Convert_Pointer_bool_To_bool(&in.FailSwapOn, &out.FailSwapOn, s); err != nil { + return err + } + out.ContainerLogMaxSize = in.ContainerLogMaxSize + if err := v1.Convert_Pointer_int32_To_int32(&in.ContainerLogMaxFiles, &out.ContainerLogMaxFiles, s); err != nil { + return err + } + out.ConfigMapAndSecretChangeDetectionStrategy = config.ResourceChangeDetectionStrategy(in.ConfigMapAndSecretChangeDetectionStrategy) + out.SystemReserved = *(*map[string]string)(unsafe.Pointer(&in.SystemReserved)) + out.KubeReserved = *(*map[string]string)(unsafe.Pointer(&in.KubeReserved)) + out.ReservedSystemCPUs = in.ReservedSystemCPUs + out.ShowHiddenMetricsForVersion = in.ShowHiddenMetricsForVersion + out.SystemReservedCgroup = in.SystemReservedCgroup + out.KubeReservedCgroup = in.KubeReservedCgroup + out.EnforceNodeAllocatable = *(*[]string)(unsafe.Pointer(&in.EnforceNodeAllocatable)) + out.AllowedUnsafeSysctls = *(*[]string)(unsafe.Pointer(&in.AllowedUnsafeSysctls)) + return nil +} + +// Convert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration is an autogenerated conversion function. +func Convert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(in *v1beta1.KubeletConfiguration, out *config.KubeletConfiguration, s conversion.Scope) error { + return autoConvert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(in, out, s) +} + +func autoConvert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(in *config.KubeletConfiguration, out *v1beta1.KubeletConfiguration, s conversion.Scope) error { + out.StaticPodPath = in.StaticPodPath + out.SyncFrequency = in.SyncFrequency + out.FileCheckFrequency = in.FileCheckFrequency + out.HTTPCheckFrequency = in.HTTPCheckFrequency + out.StaticPodURL = in.StaticPodURL + out.StaticPodURLHeader = *(*map[string][]string)(unsafe.Pointer(&in.StaticPodURLHeader)) + out.Address = in.Address + out.Port = in.Port + out.ReadOnlyPort = in.ReadOnlyPort + out.TLSCertFile = in.TLSCertFile + out.TLSPrivateKeyFile = in.TLSPrivateKeyFile + out.TLSCipherSuites = *(*[]string)(unsafe.Pointer(&in.TLSCipherSuites)) + out.TLSMinVersion = in.TLSMinVersion + out.RotateCertificates = in.RotateCertificates + out.ServerTLSBootstrap = in.ServerTLSBootstrap + if err := Convert_config_KubeletAuthentication_To_v1beta1_KubeletAuthentication(&in.Authentication, &out.Authentication, s); err != nil { + return err + } + if err := Convert_config_KubeletAuthorization_To_v1beta1_KubeletAuthorization(&in.Authorization, &out.Authorization, s); err != nil { + return err + } + if err := v1.Convert_int32_To_Pointer_int32(&in.RegistryPullQPS, &out.RegistryPullQPS, s); err != nil { + return err + } + out.RegistryBurst = in.RegistryBurst + if err := v1.Convert_int32_To_Pointer_int32(&in.EventRecordQPS, &out.EventRecordQPS, s); err != nil { + return err + } + out.EventBurst = in.EventBurst + if err := v1.Convert_bool_To_Pointer_bool(&in.EnableDebuggingHandlers, &out.EnableDebuggingHandlers, s); err != nil { + return err + } + out.EnableContentionProfiling = in.EnableContentionProfiling + if err := v1.Convert_int32_To_Pointer_int32(&in.HealthzPort, &out.HealthzPort, s); err != nil { + return err + } + out.HealthzBindAddress = in.HealthzBindAddress + if err := v1.Convert_int32_To_Pointer_int32(&in.OOMScoreAdj, &out.OOMScoreAdj, s); err != nil { + return err + } + out.ClusterDomain = in.ClusterDomain + out.ClusterDNS = *(*[]string)(unsafe.Pointer(&in.ClusterDNS)) + out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout + out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency + out.NodeStatusReportFrequency = in.NodeStatusReportFrequency + out.NodeLeaseDurationSeconds = in.NodeLeaseDurationSeconds + out.ImageMinimumGCAge = in.ImageMinimumGCAge + if err := v1.Convert_int32_To_Pointer_int32(&in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent, s); err != nil { + return err + } + if err := v1.Convert_int32_To_Pointer_int32(&in.ImageGCLowThresholdPercent, &out.ImageGCLowThresholdPercent, s); err != nil { + return err + } + out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod + out.KubeletCgroups = in.KubeletCgroups + out.SystemCgroups = in.SystemCgroups + out.CgroupRoot = in.CgroupRoot + if err := v1.Convert_bool_To_Pointer_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil { + return err + } + out.CgroupDriver = in.CgroupDriver + out.CPUManagerPolicy = in.CPUManagerPolicy + out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod + out.TopologyManagerPolicy = in.TopologyManagerPolicy + out.QOSReserved = *(*map[string]string)(unsafe.Pointer(&in.QOSReserved)) + out.RuntimeRequestTimeout = in.RuntimeRequestTimeout + out.HairpinMode = in.HairpinMode + out.MaxPods = in.MaxPods + out.PodCIDR = in.PodCIDR + if err := v1.Convert_int64_To_Pointer_int64(&in.PodPidsLimit, &out.PodPidsLimit, s); err != nil { + return err + } + out.ResolverConfig = in.ResolverConfig + if err := v1.Convert_bool_To_Pointer_bool(&in.CPUCFSQuota, &out.CPUCFSQuota, s); err != nil { + return err + } + if err := v1.Convert_v1_Duration_To_Pointer_v1_Duration(&in.CPUCFSQuotaPeriod, &out.CPUCFSQuotaPeriod, s); err != nil { + return err + } + out.MaxOpenFiles = in.MaxOpenFiles + out.ContentType = in.ContentType + if err := v1.Convert_int32_To_Pointer_int32(&in.KubeAPIQPS, &out.KubeAPIQPS, s); err != nil { + return err + } + out.KubeAPIBurst = in.KubeAPIBurst + if err := v1.Convert_bool_To_Pointer_bool(&in.SerializeImagePulls, &out.SerializeImagePulls, s); err != nil { + return err + } + out.EvictionHard = *(*map[string]string)(unsafe.Pointer(&in.EvictionHard)) + out.EvictionSoft = *(*map[string]string)(unsafe.Pointer(&in.EvictionSoft)) + out.EvictionSoftGracePeriod = *(*map[string]string)(unsafe.Pointer(&in.EvictionSoftGracePeriod)) + out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod + out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod + out.EvictionMinimumReclaim = *(*map[string]string)(unsafe.Pointer(&in.EvictionMinimumReclaim)) + out.PodsPerCore = in.PodsPerCore + if err := v1.Convert_bool_To_Pointer_bool(&in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach, s); err != nil { + return err + } + out.ProtectKernelDefaults = in.ProtectKernelDefaults + if err := v1.Convert_bool_To_Pointer_bool(&in.MakeIPTablesUtilChains, &out.MakeIPTablesUtilChains, s); err != nil { + return err + } + if err := v1.Convert_int32_To_Pointer_int32(&in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit, s); err != nil { + return err + } + if err := v1.Convert_int32_To_Pointer_int32(&in.IPTablesDropBit, &out.IPTablesDropBit, s); err != nil { + return err + } + out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + if err := v1.Convert_bool_To_Pointer_bool(&in.FailSwapOn, &out.FailSwapOn, s); err != nil { + return err + } + out.ContainerLogMaxSize = in.ContainerLogMaxSize + if err := v1.Convert_int32_To_Pointer_int32(&in.ContainerLogMaxFiles, &out.ContainerLogMaxFiles, s); err != nil { + return err + } + out.ConfigMapAndSecretChangeDetectionStrategy = v1beta1.ResourceChangeDetectionStrategy(in.ConfigMapAndSecretChangeDetectionStrategy) + out.AllowedUnsafeSysctls = *(*[]string)(unsafe.Pointer(&in.AllowedUnsafeSysctls)) + out.SystemReserved = *(*map[string]string)(unsafe.Pointer(&in.SystemReserved)) + out.KubeReserved = *(*map[string]string)(unsafe.Pointer(&in.KubeReserved)) + out.SystemReservedCgroup = in.SystemReservedCgroup + out.KubeReservedCgroup = in.KubeReservedCgroup + out.EnforceNodeAllocatable = *(*[]string)(unsafe.Pointer(&in.EnforceNodeAllocatable)) + out.ReservedSystemCPUs = in.ReservedSystemCPUs + out.ShowHiddenMetricsForVersion = in.ShowHiddenMetricsForVersion + return nil +} + +// Convert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration is an autogenerated conversion function. +func Convert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(in *config.KubeletConfiguration, out *v1beta1.KubeletConfiguration, s conversion.Scope) error { + return autoConvert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(in, out, s) +} + +func autoConvert_v1beta1_KubeletWebhookAuthentication_To_config_KubeletWebhookAuthentication(in *v1beta1.KubeletWebhookAuthentication, out *config.KubeletWebhookAuthentication, s conversion.Scope) error { + if err := v1.Convert_Pointer_bool_To_bool(&in.Enabled, &out.Enabled, s); err != nil { + return err + } + out.CacheTTL = in.CacheTTL + return nil +} + +// Convert_v1beta1_KubeletWebhookAuthentication_To_config_KubeletWebhookAuthentication is an autogenerated conversion function. +func Convert_v1beta1_KubeletWebhookAuthentication_To_config_KubeletWebhookAuthentication(in *v1beta1.KubeletWebhookAuthentication, out *config.KubeletWebhookAuthentication, s conversion.Scope) error { + return autoConvert_v1beta1_KubeletWebhookAuthentication_To_config_KubeletWebhookAuthentication(in, out, s) +} + +func autoConvert_config_KubeletWebhookAuthentication_To_v1beta1_KubeletWebhookAuthentication(in *config.KubeletWebhookAuthentication, out *v1beta1.KubeletWebhookAuthentication, s conversion.Scope) error { + if err := v1.Convert_bool_To_Pointer_bool(&in.Enabled, &out.Enabled, s); err != nil { + return err + } + out.CacheTTL = in.CacheTTL + return nil +} + +// Convert_config_KubeletWebhookAuthentication_To_v1beta1_KubeletWebhookAuthentication is an autogenerated conversion function. +func Convert_config_KubeletWebhookAuthentication_To_v1beta1_KubeletWebhookAuthentication(in *config.KubeletWebhookAuthentication, out *v1beta1.KubeletWebhookAuthentication, s conversion.Scope) error { + return autoConvert_config_KubeletWebhookAuthentication_To_v1beta1_KubeletWebhookAuthentication(in, out, s) +} + +func autoConvert_v1beta1_KubeletWebhookAuthorization_To_config_KubeletWebhookAuthorization(in *v1beta1.KubeletWebhookAuthorization, out *config.KubeletWebhookAuthorization, s conversion.Scope) error { + out.CacheAuthorizedTTL = in.CacheAuthorizedTTL + out.CacheUnauthorizedTTL = in.CacheUnauthorizedTTL + return nil +} + +// Convert_v1beta1_KubeletWebhookAuthorization_To_config_KubeletWebhookAuthorization is an autogenerated conversion function. +func Convert_v1beta1_KubeletWebhookAuthorization_To_config_KubeletWebhookAuthorization(in *v1beta1.KubeletWebhookAuthorization, out *config.KubeletWebhookAuthorization, s conversion.Scope) error { + return autoConvert_v1beta1_KubeletWebhookAuthorization_To_config_KubeletWebhookAuthorization(in, out, s) +} + +func autoConvert_config_KubeletWebhookAuthorization_To_v1beta1_KubeletWebhookAuthorization(in *config.KubeletWebhookAuthorization, out *v1beta1.KubeletWebhookAuthorization, s conversion.Scope) error { + out.CacheAuthorizedTTL = in.CacheAuthorizedTTL + out.CacheUnauthorizedTTL = in.CacheUnauthorizedTTL + return nil +} + +// Convert_config_KubeletWebhookAuthorization_To_v1beta1_KubeletWebhookAuthorization is an autogenerated conversion function. +func Convert_config_KubeletWebhookAuthorization_To_v1beta1_KubeletWebhookAuthorization(in *config.KubeletWebhookAuthorization, out *v1beta1.KubeletWebhookAuthorization, s conversion.Scope) error { + return autoConvert_config_KubeletWebhookAuthorization_To_v1beta1_KubeletWebhookAuthorization(in, out, s) +} + +func autoConvert_v1beta1_KubeletX509Authentication_To_config_KubeletX509Authentication(in *v1beta1.KubeletX509Authentication, out *config.KubeletX509Authentication, s conversion.Scope) error { + out.ClientCAFile = in.ClientCAFile + return nil +} + +// Convert_v1beta1_KubeletX509Authentication_To_config_KubeletX509Authentication is an autogenerated conversion function. +func Convert_v1beta1_KubeletX509Authentication_To_config_KubeletX509Authentication(in *v1beta1.KubeletX509Authentication, out *config.KubeletX509Authentication, s conversion.Scope) error { + return autoConvert_v1beta1_KubeletX509Authentication_To_config_KubeletX509Authentication(in, out, s) +} + +func autoConvert_config_KubeletX509Authentication_To_v1beta1_KubeletX509Authentication(in *config.KubeletX509Authentication, out *v1beta1.KubeletX509Authentication, s conversion.Scope) error { + out.ClientCAFile = in.ClientCAFile + return nil +} + +// Convert_config_KubeletX509Authentication_To_v1beta1_KubeletX509Authentication is an autogenerated conversion function. +func Convert_config_KubeletX509Authentication_To_v1beta1_KubeletX509Authentication(in *config.KubeletX509Authentication, out *v1beta1.KubeletX509Authentication, s conversion.Scope) error { + return autoConvert_config_KubeletX509Authentication_To_v1beta1_KubeletX509Authentication(in, out, s) +} + +func autoConvert_v1beta1_SerializedNodeConfigSource_To_config_SerializedNodeConfigSource(in *v1beta1.SerializedNodeConfigSource, out *config.SerializedNodeConfigSource, s conversion.Scope) error { + out.Source = in.Source + return nil +} + +// Convert_v1beta1_SerializedNodeConfigSource_To_config_SerializedNodeConfigSource is an autogenerated conversion function. +func Convert_v1beta1_SerializedNodeConfigSource_To_config_SerializedNodeConfigSource(in *v1beta1.SerializedNodeConfigSource, out *config.SerializedNodeConfigSource, s conversion.Scope) error { + return autoConvert_v1beta1_SerializedNodeConfigSource_To_config_SerializedNodeConfigSource(in, out, s) +} + +func autoConvert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource(in *config.SerializedNodeConfigSource, out *v1beta1.SerializedNodeConfigSource, s conversion.Scope) error { + out.Source = in.Source + return nil +} + +// Convert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource is an autogenerated conversion function. +func Convert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource(in *config.SerializedNodeConfigSource, out *v1beta1.SerializedNodeConfigSource, s conversion.Scope) error { + return autoConvert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource(in, out, s) +} diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.deepcopy.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..016d58c856a --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,21 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.defaults.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.defaults.go new file mode 100644 index 00000000000..7c127d46e2f --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.defaults.go @@ -0,0 +1,38 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1beta1 "k8s.io/kubelet/config/v1beta1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&v1beta1.KubeletConfiguration{}, func(obj interface{}) { SetObjectDefaults_KubeletConfiguration(obj.(*v1beta1.KubeletConfiguration)) }) + return nil +} + +func SetObjectDefaults_KubeletConfiguration(in *v1beta1.KubeletConfiguration) { + SetDefaults_KubeletConfiguration(in) +} diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/zz_generated.deepcopy.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/zz_generated.deepcopy.go new file mode 100644 index 00000000000..35acde4a0d6 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/zz_generated.deepcopy.go @@ -0,0 +1,284 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletAnonymousAuthentication) DeepCopyInto(out *KubeletAnonymousAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAnonymousAuthentication. +func (in *KubeletAnonymousAuthentication) DeepCopy() *KubeletAnonymousAuthentication { + if in == nil { + return nil + } + out := new(KubeletAnonymousAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletAuthentication) DeepCopyInto(out *KubeletAuthentication) { + *out = *in + out.X509 = in.X509 + out.Webhook = in.Webhook + out.Anonymous = in.Anonymous + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAuthentication. +func (in *KubeletAuthentication) DeepCopy() *KubeletAuthentication { + if in == nil { + return nil + } + out := new(KubeletAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletAuthorization) DeepCopyInto(out *KubeletAuthorization) { + *out = *in + out.Webhook = in.Webhook + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAuthorization. +func (in *KubeletAuthorization) DeepCopy() *KubeletAuthorization { + if in == nil { + return nil + } + out := new(KubeletAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + out.SyncFrequency = in.SyncFrequency + out.FileCheckFrequency = in.FileCheckFrequency + out.HTTPCheckFrequency = in.HTTPCheckFrequency + if in.StaticPodURLHeader != nil { + in, out := &in.StaticPodURLHeader, &out.StaticPodURLHeader + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.TLSCipherSuites != nil { + in, out := &in.TLSCipherSuites, &out.TLSCipherSuites + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Authentication = in.Authentication + out.Authorization = in.Authorization + if in.ClusterDNS != nil { + in, out := &in.ClusterDNS, &out.ClusterDNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout + out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency + out.NodeStatusReportFrequency = in.NodeStatusReportFrequency + out.ImageMinimumGCAge = in.ImageMinimumGCAge + out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod + out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod + if in.QOSReserved != nil { + in, out := &in.QOSReserved, &out.QOSReserved + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.RuntimeRequestTimeout = in.RuntimeRequestTimeout + out.CPUCFSQuotaPeriod = in.CPUCFSQuotaPeriod + if in.EvictionHard != nil { + in, out := &in.EvictionHard, &out.EvictionHard + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EvictionSoft != nil { + in, out := &in.EvictionSoft, &out.EvictionSoft + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EvictionSoftGracePeriod != nil { + in, out := &in.EvictionSoftGracePeriod, &out.EvictionSoftGracePeriod + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod + if in.EvictionMinimumReclaim != nil { + in, out := &in.EvictionMinimumReclaim, &out.EvictionMinimumReclaim + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SystemReserved != nil { + in, out := &in.SystemReserved, &out.SystemReserved + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.KubeReserved != nil { + in, out := &in.KubeReserved, &out.KubeReserved + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EnforceNodeAllocatable != nil { + in, out := &in.EnforceNodeAllocatable, &out.EnforceNodeAllocatable + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfiguration. +func (in *KubeletConfiguration) DeepCopy() *KubeletConfiguration { + if in == nil { + return nil + } + out := new(KubeletConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeletConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletWebhookAuthentication) DeepCopyInto(out *KubeletWebhookAuthentication) { + *out = *in + out.CacheTTL = in.CacheTTL + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletWebhookAuthentication. +func (in *KubeletWebhookAuthentication) DeepCopy() *KubeletWebhookAuthentication { + if in == nil { + return nil + } + out := new(KubeletWebhookAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletWebhookAuthorization) DeepCopyInto(out *KubeletWebhookAuthorization) { + *out = *in + out.CacheAuthorizedTTL = in.CacheAuthorizedTTL + out.CacheUnauthorizedTTL = in.CacheUnauthorizedTTL + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletWebhookAuthorization. +func (in *KubeletWebhookAuthorization) DeepCopy() *KubeletWebhookAuthorization { + if in == nil { + return nil + } + out := new(KubeletWebhookAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletX509Authentication) DeepCopyInto(out *KubeletX509Authentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletX509Authentication. +func (in *KubeletX509Authentication) DeepCopy() *KubeletX509Authentication { + if in == nil { + return nil + } + out := new(KubeletX509Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializedNodeConfigSource) DeepCopyInto(out *SerializedNodeConfigSource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedNodeConfigSource. +func (in *SerializedNodeConfigSource) DeepCopy() *SerializedNodeConfigSource { + if in == nil { + return nil + } + out := new(SerializedNodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SerializedNodeConfigSource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/pkg/yurtctl/kubernetes/kubelet/kubeletconfig/util/codec/codec.go b/pkg/yurtctl/kubernetes/kubelet/kubeletconfig/util/codec/codec.go new file mode 100644 index 00000000000..91dec5e7346 --- /dev/null +++ b/pkg/yurtctl/kubernetes/kubelet/kubeletconfig/util/codec/codec.go @@ -0,0 +1,106 @@ +/* +Copyright 2017 The Kubernetes Authors. +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package codec + +import ( + "fmt" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/component-base/codec" + "k8s.io/klog/v2" + kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" + + kubeletconfig "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubelet/apis/config" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubelet/apis/config/scheme" +) + +// EncodeKubeletConfig encodes an internal KubeletConfiguration to an external YAML representation. +func EncodeKubeletConfig(internal *kubeletconfig.KubeletConfiguration, targetVersion schema.GroupVersion) ([]byte, error) { + encoder, err := NewKubeletconfigYAMLEncoder(targetVersion) + if err != nil { + return nil, err + } + // encoder will convert to external version + data, err := runtime.Encode(encoder, internal) + if err != nil { + return nil, err + } + return data, nil +} + +// NewKubeletconfigYAMLEncoder returns an encoder that can write objects in the kubeletconfig API group to YAML. +func NewKubeletconfigYAMLEncoder(targetVersion schema.GroupVersion) (runtime.Encoder, error) { + _, codecs, err := scheme.NewSchemeAndCodecs() + if err != nil { + return nil, err + } + mediaType := "application/yaml" + info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType) + if !ok { + return nil, fmt.Errorf("unsupported media type %q", mediaType) + } + return codecs.EncoderForVersion(info.Serializer, targetVersion), nil +} + +// DecodeKubeletConfiguration decodes a serialized KubeletConfiguration to the internal type. +func DecodeKubeletConfiguration(kubeletCodecs *serializer.CodecFactory, data []byte) (*kubeletconfig.KubeletConfiguration, error) { + var ( + obj runtime.Object + gvk *schema.GroupVersionKind + ) + + // The UniversalDecoder runs defaulting and returns the internal type by default. + obj, gvk, err := kubeletCodecs.UniversalDecoder().Decode(data, nil, nil) + if err != nil { + // Try strict decoding first. If that fails decode with a lenient + // decoder, which has only v1beta1 registered, and log a warning. + // The lenient path is to be dropped when support for v1beta1 is dropped. + if !runtime.IsStrictDecodingError(err) { + return nil, errors.Wrap(err, "failed to decode") + } + + var lenientErr error + _, lenientCodecs, lenientErr := codec.NewLenientSchemeAndCodecs( + kubeletconfig.AddToScheme, + kubeletconfigv1beta1.AddToScheme, + ) + + if lenientErr != nil { + return nil, lenientErr + } + + obj, gvk, lenientErr = lenientCodecs.UniversalDecoder().Decode(data, nil, nil) + if lenientErr != nil { + // Lenient decoding failed with the current version, return the + // original strict error. + return nil, fmt.Errorf("failed lenient decoding: %v", err) + } + // Continue with the v1beta1 object that was decoded leniently, but emit a warning. + klog.Warningf("using lenient decoding as strict decoding failed: %v", err) + } + + internalKC, ok := obj.(*kubeletconfig.KubeletConfiguration) + if !ok { + return nil, fmt.Errorf("failed to cast object to KubeletConfiguration, unexpected type: %v", gvk) + } + + return internalKC, nil +} diff --git a/pkg/yurtctl/util/edgenode/common.go b/pkg/yurtctl/util/edgenode/common.go index 384f632e2ed..c40b68f7d17 100644 --- a/pkg/yurtctl/util/edgenode/common.go +++ b/pkg/yurtctl/util/edgenode/common.go @@ -78,7 +78,7 @@ spec: type: Directory containers: - name: yurt-hub - image: __yurthub_image__ + image: {{.image}} imagePullPolicy: IfNotPresent volumeMounts: - name: hub-dir @@ -90,10 +90,13 @@ spec: command: - yurthub - --v=2 - - --server-addr=__kubernetes_service_addr__ + - --server-addr={{.kubernetesServerAddr}} - --node-name=$(NODE_NAME) - - --join-token=__join_token__ - - --working-mode=__working_mode__ + - --join-token={{.joinToken}} + - --working-mode={{.workingMode}} + {{if .organizations }} + - --hub-cert-organizations={{.organizations}} + {{end}} livenessProbe: httpGet: host: 127.0.0.1 diff --git a/pkg/yurtctl/util/kubernetes/util.go b/pkg/yurtctl/util/kubernetes/util.go index 440adf08d67..278e71781dd 100644 --- a/pkg/yurtctl/util/kubernetes/util.go +++ b/pkg/yurtctl/util/kubernetes/util.go @@ -17,6 +17,7 @@ limitations under the License. package kubernetes import ( + "bufio" "bytes" "context" "errors" @@ -32,6 +33,7 @@ import ( "sync" "time" + pkgerrors "github.com/pkg/errors" "github.com/spf13/pflag" "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" @@ -53,14 +55,19 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/restmapper" "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/util/homedir" bootstrapapi "k8s.io/cluster-bootstrap/token/api" bootstraputil "k8s.io/cluster-bootstrap/token/util" "k8s.io/klog/v2" "github.com/openyurtio/openyurt/pkg/projectinfo" - "github.com/openyurtio/openyurt/pkg/util/kubeadmapi" "github.com/openyurtio/openyurt/pkg/yurtctl/constants" + kubeadmapi "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/apis/kubeadm" + kubeadmconstants "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/constants" + nodetoken "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/phases/bootstraptoken/node" + "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/apiclient" + kubeconfigutil "github.com/openyurtio/openyurt/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig" "github.com/openyurtio/openyurt/pkg/yurtctl/util" "github.com/openyurtio/openyurt/pkg/yurtctl/util/edgenode" strutil "github.com/openyurtio/openyurt/pkg/yurtctl/util/strings" @@ -93,6 +100,8 @@ var ( "1.19", "1.19+", "1.20", "1.20+", "1.21", "1.21+"} + + ErrClusterVersionEmpty = errors.New("Cluster version should not be empty") ) func processCreateErr(kind string, name string, err error) error { @@ -655,11 +664,11 @@ func GetOrCreateJoinTokenString(cliSet *kubernetes.Clientset) (string, error) { } klog.V(1).Infoln("[token] creating token") - if err := kubeadmapi.CreateNewTokens(cliSet, + if err := nodetoken.CreateNewTokens(cliSet, []kubeadmapi.BootstrapToken{{ Token: token, - Usages: kubeadmapi.DefaultTokenUsages, - Groups: kubeadmapi.DefaultTokenGroups, + Usages: kubeadmconstants.DefaultTokenUsages, + Groups: kubeadmconstants.DefaultTokenGroups, }}); err != nil { return "", err } @@ -682,7 +691,7 @@ func usagesAndGroupsAreValid(token *kubeadmapi.BootstrapToken) bool { return true } - return sliceEqual(token.Usages, kubeadmapi.DefaultTokenUsages) && sliceEqual(token.Groups, kubeadmapi.DefaultTokenGroups) + return sliceEqual(token.Usages, kubeadmconstants.DefaultTokenUsages) && sliceEqual(token.Groups, kubeadmconstants.DefaultTokenGroups) } // find kube-controller-manager deployed through static file @@ -703,7 +712,14 @@ func GetKubeControllerManagerHANodes(cliSet *kubernetes.Clientset) ([]string, er //CheckAndInstallKubelet install kubelet and kubernetes-cni, skip install if they exist. func CheckAndInstallKubelet(clusterVersion string) error { - klog.Info("Check and install kubelet.") + if strings.Contains(clusterVersion, "-") { + clusterVersion = strings.Split(clusterVersion, "-")[0] + } + + klog.Infof("Check and install kubelet %s", clusterVersion) + if clusterVersion == "" { + return ErrClusterVersionEmpty + } kubeletExist := false if _, err := exec.LookPath("kubelet"); err == nil { if b, err := exec.Command("kubelet", "--version").CombinedOutput(); err == nil { @@ -737,11 +753,6 @@ func CheckAndInstallKubelet(clusterVersion string) error { } } } - if _, err := os.Stat(constants.StaticPodPath); os.IsNotExist(err) { - if err := os.MkdirAll(constants.StaticPodPath, 0755); err != nil { - return err - } - } if _, err := os.Stat(constants.KubeCniDir); err == nil { klog.Infof("Cni dir %s already exist, skip install.", constants.KubeCniDir) @@ -786,9 +797,9 @@ func SetKubeletService() error { return nil } -//SetKubeletUnitConfig configure kubelet startup parameters. -func SetKubeletUnitConfig(nodeType string) error { - kubeletUnitDir := filepath.Dir(edgenode.KubeletSvcPath) +// SetKubeletUnitConfig configure kubelet startup parameters. +func SetKubeletUnitConfig() error { + kubeletUnitDir := filepath.Dir(constants.KubeletServiceConfPath) if _, err := os.Stat(kubeletUnitDir); err != nil { if os.IsNotExist(err) { if err := os.MkdirAll(kubeletUnitDir, os.ModePerm); err != nil { @@ -800,15 +811,91 @@ func SetKubeletUnitConfig(nodeType string) error { return err } } - if nodeType == constants.EdgeNode { - if err := ioutil.WriteFile(edgenode.KubeletSvcPath, []byte(constants.EdgeKubeletUnitConfig), 0600); err != nil { + + if err := ioutil.WriteFile(constants.KubeletServiceConfPath, []byte(constants.KubeletUnitConfig), 0600); err != nil { + return err + } + + return nil +} + +// SetKubeletConfigForNode write kubelet.conf for join node. +func SetKubeletConfigForNode() error { + kubeconfigFilePath := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.KubeletKubeConfigFileName) + kubeletConfigDir := filepath.Dir(kubeconfigFilePath) + if _, err := os.Stat(kubeletConfigDir); err != nil { + if os.IsNotExist(err) { + if err := os.MkdirAll(kubeletConfigDir, os.ModePerm); err != nil { + klog.Errorf("Create dir %s fail: %v", kubeletConfigDir, err) + return err + } + } else { + klog.Errorf("Describe dir %s fail: %v", kubeletConfigDir, err) return err } - } else { - if err := ioutil.WriteFile(edgenode.KubeletSvcPath, []byte(constants.CloudKubeletUnitConfig), 0600); err != nil { + } + if err := ioutil.WriteFile(kubeconfigFilePath, []byte(constants.KubeletConfForNode), 0755); err != nil { + return err + } + return nil +} + +// SetKubeletCaCert write ca.crt for join node. +func SetKubeletCaCert(config *clientcmdapi.Config) error { + kubeletCaCertPath := filepath.Join(kubeadmconstants.KubernetesDir, "pki", kubeadmconstants.CACertName) + kubeletCaCertDir := filepath.Dir(kubeletCaCertPath) + if _, err := os.Stat(kubeletCaCertDir); err != nil { + if os.IsNotExist(err) { + if err := os.MkdirAll(kubeletCaCertDir, os.ModePerm); err != nil { + klog.Errorf("Create dir %s fail: %v", kubeletCaCertDir, err) + return err + } + } else { + klog.Errorf("Describe dir %s fail: %v", kubeletCaCertDir, err) return err } } + clusterinfo := kubeconfigutil.GetClusterFromKubeConfig(config) + if err := ioutil.WriteFile(kubeletCaCertPath, []byte(clusterinfo.CertificateAuthorityData), 0755); err != nil { + return err + } return nil } + +// GetKubernetesVersionFromCluster get kubernetes cluster version from master. +func GetKubernetesVersionFromCluster(client kubernetes.Interface) (string, error) { + var kubernetesVersion string + // Also, the config map really should be KubeadmConfigConfigMap... + configMap, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, kubeadmconstants.KubeadmConfigConfigMap) + if err != nil { + return kubernetesVersion, pkgerrors.Wrap(err, "failed to get config map") + } + + // gets ClusterConfiguration from kubeadm-config + clusterConfigurationData, ok := configMap.Data[kubeadmconstants.ClusterConfigurationConfigMapKey] + if !ok { + return kubernetesVersion, pkgerrors.Errorf("unexpected error when reading kubeadm-config ConfigMap: %s key value pair missing", kubeadmconstants.ClusterConfigurationConfigMapKey) + } + + scanner := bufio.NewScanner(strings.NewReader(clusterConfigurationData)) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Split(line, ":") + if len(parts) != 2 { + continue + } + + if strings.Contains(parts[0], "kubernetesVersion") { + kubernetesVersion = strings.TrimSpace(parts[1]) + break + } + } + + if len(kubernetesVersion) == 0 { + return kubernetesVersion, errors.New("failed to get Kubernetes version") + } + + klog.Infof("kubernetes version: %s", kubernetesVersion) + return kubernetesVersion, nil +}