From 34e7fec94a6a75671298e56243234f5db4026aff Mon Sep 17 00:00:00 2001 From: Johannes Scheerer Date: Thu, 14 Apr 2022 15:13:13 +0200 Subject: [PATCH] Use TokenRequest API instead of calico-nodes service account token for CNI kubeconfig. With projected service account tokens in kubernetes, the service account tokens of pods are bound to the lifetime of the corresponding pod. Therefore, it may lead to problems if an external process re-uses the token of a pod. The CNI binaries used the token of calico-node. However, in case calico-node got stopped the corresponding token lost its validity and hence could no longer be used for CNI operations. Usually, this automatically resolves over time, but there are some edge cases where this is not possible, e.g. if calico-node is autoscaled in terms of resources and the new resource requests would require preemption/eviction of an existing pod the CNI operation to delete the network sandbox will fail due to the no longer valid token (as calico-node was stopped beforehand). This change switches over to using the TokenRequest API instead, i.e. creating new tokens with limited validity. It would have been good to bind the token to an object, e.g. to the corresponding node, but as of now only secret and pod are supported types for binding tokens. Hence, the tokens are only limited in time and not bound to any other kubernetes object. --- app-policy/config/install/05-calico.yaml | 7 + .../calico/templates/calico-node-rbac.yaml | 8 + .../kubernetes/hardway/install-node.md | 8 + cni-plugin/Makefile | 4 +- cni-plugin/pkg/install/install.go | 12 +- cni-plugin/pkg/install/install_test.go | 80 ++++++ node/Makefile | 4 +- node/pkg/cni/token_watch.go | 251 +++++++++++++++--- node/pkg/cni/token_watch_suite_test.go | 22 ++ node/pkg/cni/token_watch_test.go | 94 +++++++ node/tests/k8st/infra/calico-kdd.yaml | 8 + 11 files changed, 450 insertions(+), 48 deletions(-) create mode 100644 node/pkg/cni/token_watch_suite_test.go create mode 100644 node/pkg/cni/token_watch_test.go diff --git a/app-policy/config/install/05-calico.yaml b/app-policy/config/install/05-calico.yaml index 9604987f11d..75f02ee0046 100644 --- a/app-policy/config/install/05-calico.yaml +++ b/app-policy/config/install/05-calico.yaml @@ -96,6 +96,13 @@ rules: - get - list - watch + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-node + verbs: + - create --- apiVersion: rbac.authorization.k8s.io/v1beta1 diff --git a/calico/_includes/charts/calico/templates/calico-node-rbac.yaml b/calico/_includes/charts/calico/templates/calico-node-rbac.yaml index 5d39ecd0f53..3aa1ecae5d0 100644 --- a/calico/_includes/charts/calico/templates/calico-node-rbac.yaml +++ b/calico/_includes/charts/calico/templates/calico-node-rbac.yaml @@ -70,6 +70,14 @@ rules: - pods/status verbs: - patch + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-node + verbs: + - create # Calico monitors various CRDs for config. - apiGroups: ["crd.projectcalico.org"] resources: diff --git a/calico/getting-started/kubernetes/hardway/install-node.md b/calico/getting-started/kubernetes/hardway/install-node.md index d2dc657a8a1..6051d9128f1 100644 --- a/calico/getting-started/kubernetes/hardway/install-node.md +++ b/calico/getting-started/kubernetes/hardway/install-node.md @@ -121,6 +121,14 @@ rules: - pods/status verbs: - patch + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-node + verbs: + - create # Calico monitors various CRDs for config. - apiGroups: ["crd.projectcalico.org"] resources: diff --git a/cni-plugin/Makefile b/cni-plugin/Makefile index 7fc7a63ee1e..8a4c9a0aedf 100644 --- a/cni-plugin/Makefile +++ b/cni-plugin/Makefile @@ -207,8 +207,8 @@ pkg/install/install.test: pkg/install/*.go .PHONY: test-install-cni ## Test the install -test-install-cni: image pkg/install/install.test - cd pkg/install && CONTAINER_NAME=$(CNI_PLUGIN_IMAGE):latest-$(ARCH) ./install.test +test-install-cni: run-k8s-apiserver image pkg/install/install.test + cd pkg/install && CONTAINER_NAME=$(CNI_PLUGIN_IMAGE):latest-$(ARCH) CERTS_PATH=$(CERTS_PATH) ./install.test ############################################################################### # CI/CD diff --git a/cni-plugin/pkg/install/install.go b/cni-plugin/pkg/install/install.go index a731adc9ec7..8a7908ff207 100644 --- a/cni-plugin/pkg/install/install.go +++ b/cni-plugin/pkg/install/install.go @@ -36,6 +36,7 @@ import ( "github.com/projectcalico/calico/libcalico-go/lib/logutils" "github.com/projectcalico/calico/libcalico-go/lib/names" + "github.com/projectcalico/calico/node/pkg/cni" ) type config struct { @@ -476,7 +477,16 @@ contexts: user: calico current-context: calico-context` - data = strings.Replace(data, "TOKEN", kubecfg.BearerToken, 1) + clientset, err := cni.BuildClientSet() + if err != nil { + logrus.WithError(err).Fatal("Unable to create client for generating CNI token") + } + tr := cni.NewTokenRefresher(clientset, cni.NamespaceOfUsedServiceAccount(), cni.DefaultServiceAccountName) + tu, err := tr.UpdateToken() + if err != nil { + logrus.WithError(err).Fatal("Unable to create token for CNI kubeconfig") + } + data = strings.Replace(data, "TOKEN", tu.Token, 1) data = strings.Replace(data, "__KUBERNETES_SERVICE_PROTOCOL__", getEnv("KUBERNETES_SERVICE_PROTOCOL", "https"), -1) data = strings.Replace(data, "__KUBERNETES_SERVICE_HOST__", getEnv("KUBERNETES_SERVICE_HOST", ""), -1) data = strings.Replace(data, "__KUBERNETES_SERVICE_PORT__", getEnv("KUBERNETES_SERVICE_PORT", ""), -1) diff --git a/cni-plugin/pkg/install/install_test.go b/cni-plugin/pkg/install/install_test.go index 495ac6d933f..be0b08458b5 100644 --- a/cni-plugin/pkg/install/install_test.go +++ b/cni-plugin/pkg/install/install_test.go @@ -2,6 +2,7 @@ package install import ( + "context" "fmt" "io/ioutil" "math/rand" @@ -13,6 +14,11 @@ import ( . "github.com/onsi/gomega" log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" ) var expectedDefaultConfig string = `{ @@ -79,13 +85,16 @@ func runCniContainer(tempDir string, binFolderWriteable bool, extraArgs ...strin } args := []string{ "run", "--rm", "--name", name, + "--net=host", "-e", "SLEEP=false", "-e", "KUBERNETES_SERVICE_HOST=127.0.0.1", "-e", "KUBERNETES_SERVICE_PORT=6443", "-e", "KUBERNETES_NODE_NAME=my-node", + "-e", "KUBECONFIG=/home/user/certs/kubeconfig", "-v", tempDir + "/bin:" + binFolder, "-v", tempDir + "/net.d:/host/etc/cni/net.d", "-v", tempDir + "/serviceaccount:/var/run/secrets/kubernetes.io/serviceaccount", + "-v", os.Getenv("CERTS_PATH") + ":/home/user/certs", } args = append(args, extraArgs...) image := os.Getenv("CONTAINER_NAME") @@ -99,6 +108,54 @@ func runCniContainer(tempDir string, binFolderWriteable bool, extraArgs ...strin return err } +func createKubernetesClient() *kubernetes.Clientset { + certsPath := os.Getenv("CERTS_PATH") + if len(certsPath) == 0 { + Fail("CERTS_PATH env variable not set") + } + kubeconfigPath := certsPath + "/kubeconfig" + kubeconfigData, err := ioutil.ReadFile(kubeconfigPath) + if err != nil { + Fail(fmt.Sprintf("Failed to read kubeconfig file: %v", err)) + } + // The client certificate/key do not necessarily reside in the location specified by kubeconfig => patch it directly + config, err := clientcmd.Load(kubeconfigData) + if err != nil { + Fail(fmt.Sprintf("Failed to load kubeconfig: %v", err)) + } + certificate, err := ioutil.ReadFile(certsPath + "/admin.pem") + if err != nil { + Fail(fmt.Sprintf("Failed to read admin client certificate: %v", err)) + } + key, err := ioutil.ReadFile(certsPath + "/admin-key.pem") + if err != nil { + Fail(fmt.Sprintf("Failed to read admin client key: %v", err)) + } + + overrides := &clientcmd.ConfigOverrides{ + AuthInfo: api.AuthInfo{ + ClientCertificate: "", + ClientCertificateData: certificate, + ClientKey: "", + ClientKeyData: key, + }, + } + adminAuthInfo := config.AuthInfos["admin"] + adminAuthInfo.ClientCertificate = "" + adminAuthInfo.ClientCertificateData = certificate + adminAuthInfo.ClientKey = "" + adminAuthInfo.ClientKeyData = key + kubeconfig, err := clientcmd.NewDefaultClientConfig(*config, overrides).ClientConfig() + if err != nil { + Fail(fmt.Sprintf("Failed to create kubeconfig: %v", err)) + } + clientset, err := kubernetes.NewForConfig(kubeconfig) + if err != nil { + Fail(fmt.Sprintf("Could not create kubernetes client: %v", err)) + } + return clientset +} + var _ = Describe("CNI installation tests", func() { var tempDir string BeforeEach(func() { @@ -158,6 +215,29 @@ PuB/TL+u2y+iQUyXxLy3 if err != nil { Fail(fmt.Sprintf("Failed to write k8s CA file for test: %v", err)) } + + // Create namespace file for token refresh + k8sNamespace := []byte("kube-system") + var namespaceFile = fmt.Sprintf("%s/serviceaccount/namespace", tempDir) + err = ioutil.WriteFile(namespaceFile, k8sNamespace, 0755) + if err != nil { + Fail(fmt.Sprintf("Failed to write k8s namespace file: %v", err)) + } + + // Create calico-node service account + serviceAccount := &v1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "calico-node", + }, + } + _, err = createKubernetesClient().CoreV1().ServiceAccounts("kube-system").Create(context.Background(), serviceAccount, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + // Cleanup calico-node service account + err := createKubernetesClient().CoreV1().ServiceAccounts("kube-system").Delete(context.Background(), "calico-node", metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) }) Context("Install with default values", func() { diff --git a/node/Makefile b/node/Makefile index 6b5b678197b..3d422c6e3ad 100644 --- a/node/Makefile +++ b/node/Makefile @@ -292,10 +292,10 @@ fv: run-k8s-apiserver -v $(CERTS_PATH):/home/user/certs \ -e KUBECONFIG=/go/src/github.com/projectcalico/calico/hack/test/certs/kubeconfig \ -e ETCD_ENDPOINTS=http://$(LOCAL_IP_ENV):2379 \ - $(CALICO_BUILD) ginkgo -cover -r -skipPackage vendor pkg/lifecycle/startup pkg/allocateip $(GINKGO_ARGS) + $(CALICO_BUILD) ginkgo -cover -r -skipPackage vendor pkg/lifecycle/startup pkg/allocateip pkg/cni $(GINKGO_ARGS) # Skip packages containing FV tests. -UT_PACKAGES_TO_SKIP?=pkg/lifecycle/startup,pkg/allocateip,pkg/status +UT_PACKAGES_TO_SKIP?=pkg/lifecycle/startup,pkg/allocateip,pkg/status,pkg/cni .PHONY: ut ut: $(DOCKER_GO_BUILD) sh -c '$(GIT_CONFIG_SSH) ginkgo -r -skipPackage=$(UT_PACKAGES_TO_SKIP) $(GINKGO_ARGS)' diff --git a/node/pkg/cni/token_watch.go b/node/pkg/cni/token_watch.go index d78db589c36..da784876d7b 100644 --- a/node/pkg/cni/token_watch.go +++ b/node/pkg/cni/token_watch.go @@ -1,77 +1,242 @@ package cni import ( + "context" "encoding/base64" + "encoding/json" "fmt" "io/ioutil" + "math/rand" "os" + "strings" + "sync" "time" "github.com/sirupsen/logrus" - "gopkg.in/fsnotify/fsnotify.v1" + authenticationv1 "k8s.io/api/authentication/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" ) -var serviceaccountDirectory string = "/var/run/secrets/kubernetes.io/serviceaccount/" +const DefaultServiceAccountName = "calico-node" +const serviceAccountNamespace = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" +const tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" +const defaultCNITokenValiditySeconds = 24 * 60 * 60 +const minTokenRetryDuration = 5 * time.Second +const defaultRefreshFraction = 4 + var kubeconfigPath string = "/host/etc/cni/net.d/calico-kubeconfig" -func Run() { - // Log to stdout. this prevents our logs from being interpreted as errors by, for example, - // fluentd's default configuration. - logrus.SetOutput(os.Stdout) +type TokenRefresher struct { + tokenSupported bool + tokenOnce *sync.Once + + tokenValiditySeconds int64 + minTokenRetryDuration time.Duration + defaultRefreshFraction time.Duration + + clientset *kubernetes.Clientset - // Create a watcher for file changes. - watcher, err := fsnotify.NewWatcher() + namespace string + serviceAccountName string + + tokenChan chan TokenUpdate + stopChan chan struct{} +} + +type TokenUpdate struct { + Token string + ExpirationTime time.Time +} + +func NamespaceOfUsedServiceAccount() string { + namespace, err := ioutil.ReadFile(serviceAccountNamespace) if err != nil { - panic(err) + logrus.WithError(err).Fatal("Failed to read service account namespace file") } - defer watcher.Close() + return string(namespace) +} - // Watch for changes to the serviceaccount directory. Rety if necessary. - for { - if err := watcher.Add(serviceaccountDirectory); err != nil { - // Error watching the file - retry - logrus.WithError(err).Error("Failed to watch Kubernetes serviceaccount files.") - time.Sleep(5 * time.Second) - continue - } +func BuildClientSet() (*kubernetes.Clientset, error) { + cfg, err := clientcmd.BuildConfigFromFlags("", os.Getenv("KUBECONFIG")) + if err != nil { + return nil, err + } + return kubernetes.NewForConfig(cfg) +} + +func NewTokenRefresher(clientset *kubernetes.Clientset, namespace string, serviceAccountName string) *TokenRefresher { + return NewTokenRefresherWithCustomTiming(clientset, namespace, serviceAccountName, defaultCNITokenValiditySeconds, minTokenRetryDuration, defaultRefreshFraction) +} + +func NewTokenRefresherWithCustomTiming(clientset *kubernetes.Clientset, namespace string, serviceAccountName string, tokenValiditySeconds int64, minTokenRetryDuration time.Duration, defaultRefreshFraction time.Duration) *TokenRefresher { + return &TokenRefresher{ + tokenSupported: false, + tokenOnce: &sync.Once{}, + tokenValiditySeconds: tokenValiditySeconds, + minTokenRetryDuration: minTokenRetryDuration, + defaultRefreshFraction: defaultRefreshFraction, + clientset: clientset, + namespace: namespace, + serviceAccountName: serviceAccountName, + tokenChan: make(chan TokenUpdate), + stopChan: make(chan struct{}), + } +} + +func (t *TokenRefresher) UpdateToken() (TokenUpdate, error) { + validity := t.tokenValiditySeconds + tr := &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + Audiences: []string{}, + ExpirationSeconds: &validity, + }, + } - // Successfully watched the file. Break from the retry loop. - logrus.WithField("directory", serviceaccountDirectory).Info("Watching contents for changes.") - break + tokenRequest, err := t.clientset.CoreV1().ServiceAccounts(t.namespace).CreateToken(context.TODO(), t.serviceAccountName, tr, metav1.CreateOptions{}) + if apierrors.IsNotFound(err) && !t.tokenRequestSupported(t.clientset) { + logrus.WithError(err).Debug("Unable to create token for CNI kubeconfig as token request api is not supported, falling back to local service account token") + return tokenUpdateFromFile() } + if err != nil { + logrus.WithError(err).Error("Unable to create token for CNI kubeconfig") + return TokenUpdate{}, err + } + + return TokenUpdate{ + Token: tokenRequest.Status.Token, + ExpirationTime: tokenRequest.Status.ExpirationTimestamp.Time, + }, nil +} + +func (t *TokenRefresher) TokenChan() <-chan TokenUpdate { + return t.tokenChan +} + +func (t *TokenRefresher) Stop() { + close(t.stopChan) +} - // Handle events from the watcher. +func (t *TokenRefresher) Run() { + var nextExpiration time.Time + rand := rand.New(rand.NewSource(time.Now().UnixNano())) for { - // To prevent tight looping, add a sleep here. - time.Sleep(1 * time.Second) + tu, err := t.UpdateToken() + if err != nil { + logrus.WithError(err).Error("Failed to update CNI token, retrying...") + // Reset nextExpiration to retry directly + nextExpiration = time.Time{} + } else { + nextExpiration = tu.ExpirationTime + select { + case t.tokenChan <- tu: + case <-t.stopChan: + return + } + } + now := time.Now() + var sleepTime time.Duration + // Do some basic rate limiting to prevent flooding the kube apiserver with requests + if nextExpiration.Before(now.Add(t.minTokenRetryDuration * t.defaultRefreshFraction)) { + sleepTime = t.minTokenRetryDuration + } else { + sleepTime = nextExpiration.Sub(now) / t.defaultRefreshFraction + } + jitter := rand.Float32() * float32(sleepTime) + sleepTime += time.Duration(jitter) + if logrus.IsLevelEnabled(logrus.DebugLevel) { + logrus.Debugf("Going to sleep for %s", sleepTime.String()) + } select { - case event := <-watcher.Events: - // We've received a notification that the Kubernetes secrets files have changed. - // Update the kubeconfig file on disk to match. - logrus.WithField("event", event).Info("Notified of change to serviceaccount files.") - cfg, err := rest.InClusterConfig() - if err != nil { - logrus.WithError(err).Error("Error generating kube config.") - continue - } - err = rest.LoadTLSFiles(cfg) - if err != nil { - logrus.WithError(err).Error("Error loading TLS files.") - continue + case <-time.After(sleepTime): + case <-t.stopChan: + return + } + } +} + +func (t *TokenRefresher) tokenRequestSupported(clientset *kubernetes.Clientset) bool { + t.tokenOnce.Do(func() { + resources, err := clientset.Discovery().ServerResourcesForGroupVersion("v1") + if err != nil { + return + } + for _, resource := range resources.APIResources { + if resource.Name == "serviceaccounts/token" { + t.tokenSupported = true + return } - writeKubeconfig(cfg) + } + }) + return t.tokenSupported +} + +func tokenUpdateFromFile() (TokenUpdate, error) { + tokenBytes, err := ioutil.ReadFile(tokenFile) + if err != nil { + logrus.WithError(err).Error("Failed to read service account token file") + return TokenUpdate{}, err + } + token := string(tokenBytes) + tokenSegments := strings.Split(token, ".") + if len(tokenSegments) != 3 { + err := fmt.Errorf("invalid token segment size: %d", len(tokenSegments)) + logrus.WithError(err).Error("Failed parsing service account token") + return TokenUpdate{}, err + } + unparsedClaims := tokenSegments[1] + // Padding may be missing, hence check and append it + if l := len(unparsedClaims) % 4; l > 0 { + unparsedClaims += strings.Repeat("=", 4-l) + } + decodedClaims, err := base64.URLEncoding.DecodeString(unparsedClaims) + if err != nil { + logrus.WithError(err).Error("Failed to decode service account token claims") + return TokenUpdate{}, err + } + var claimMap map[string]interface{} + err = json.Unmarshal(decodedClaims, &claimMap) + if err != nil { + logrus.WithError(err).Error("Failed to unmarshal service account token claims") + return TokenUpdate{}, err + } + return TokenUpdate{ + Token: token, + ExpirationTime: time.Unix(int64(claimMap["exp"].(float64)), 0), + }, nil +} - case err := <-watcher.Errors: - // We've received an error - log it out but don't exit. - logrus.WithError(err).Error("Error watching serviceaccount files.") +func Run() { + clientset, err := BuildClientSet() + if err != nil { + logrus.WithError(err).Fatal("Failed to create in cluster client set") + } + tr := NewTokenRefresher(clientset, NamespaceOfUsedServiceAccount(), DefaultServiceAccountName) + tokenChan := tr.TokenChan() + go tr.Run() + + for tu := range tokenChan { + logrus.Info("Update of CNI kubeconfig triggered based on elapsed time.") + cfg, err := rest.InClusterConfig() + if err != nil { + logrus.WithError(err).Error("Error generating kube config.") + continue + } + err = rest.LoadTLSFiles(cfg) + if err != nil { + logrus.WithError(err).Error("Error loading TLS files.") + continue } + writeKubeconfig(cfg, tu.Token) } } // writeKubeconfig writes an updated kubeconfig file to disk that the CNI plugin can use to access the Kubernetes API. -func writeKubeconfig(cfg *rest.Config) { +func writeKubeconfig(cfg *rest.Config, token string) { template := `# Kubeconfig file for Calico CNI plugin. Installed by calico/node. apiVersion: v1 kind: Config @@ -92,7 +257,7 @@ contexts: current-context: calico-context` // Replace the placeholders. - data := fmt.Sprintf(template, cfg.Host, base64.StdEncoding.EncodeToString(cfg.CAData), cfg.BearerToken) + data := fmt.Sprintf(template, cfg.Host, base64.StdEncoding.EncodeToString(cfg.CAData), token) // Write the filled out config to disk. if err := ioutil.WriteFile(kubeconfigPath, []byte(data), 0600); err != nil { diff --git a/node/pkg/cni/token_watch_suite_test.go b/node/pkg/cni/token_watch_suite_test.go new file mode 100644 index 00000000000..1f0a4080ccd --- /dev/null +++ b/node/pkg/cni/token_watch_suite_test.go @@ -0,0 +1,22 @@ +package cni_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" + + "github.com/onsi/ginkgo/reporters" + + "github.com/projectcalico/calico/libcalico-go/lib/testutils" +) + +func init() { + testutils.HookLogrusForGinkgo() +} + +func TestCommands(t *testing.T) { + RegisterFailHandler(Fail) + junitReporter := reporters.NewJUnitReporter("../../report/cnitokenwatch_suite.xml") + RunSpecsWithDefaultAndCustomReporters(t, "CNITokenWatch Suite", []Reporter{junitReporter}) +} diff --git a/node/pkg/cni/token_watch_test.go b/node/pkg/cni/token_watch_test.go new file mode 100644 index 00000000000..57ccd5225ae --- /dev/null +++ b/node/pkg/cni/token_watch_test.go @@ -0,0 +1,94 @@ +package cni_test + +import ( + "context" + "fmt" + "os" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + + "github.com/projectcalico/calico/node/pkg/cni" +) + +var _ = Describe("FV tests", func() { + const serviceAccountName = "cni-token-watch-test" + const namespace = metav1.NamespaceSystem + + var clientset *kubernetes.Clientset + + BeforeEach(func() { + clientset = createKubernetesClient() + setupServiceAccount(clientset, serviceAccountName, namespace) + }) + + AfterEach(func() { + cleanupServiceAccount(clientset, serviceAccountName, namespace) + }) + + It("should create a token successfully", func() { + tr := cni.NewTokenRefresher(clientset, namespace, serviceAccountName) + tu, err := tr.UpdateToken() + Expect(err).ShouldNot(HaveOccurred()) + Expect(tu.Token).NotTo(BeEmpty()) + }) + + It("should create a token successfully and deliver it through the channel", func() { + tr := cni.NewTokenRefresher(clientset, namespace, serviceAccountName) + tokenChan := tr.TokenChan() + go tr.Run() + defer tr.Stop() + var receivedToken cni.TokenUpdate + Eventually(tokenChan, 5*time.Second).Should(Receive(&receivedToken)) + Expect(receivedToken.Token).NotTo(BeEmpty()) + }) + + It("should create multiple tokens successfully and deliver them through the channel", func() { + const iterations = 5 + // kube-apiserver does not allow smaller validity periods than 10 minutes + const tokenValiditySeconds = 600 + + tr := cni.NewTokenRefresherWithCustomTiming(clientset, namespace, serviceAccountName, tokenValiditySeconds, 1*time.Nanosecond, 600000) + tokenChan := tr.TokenChan() + go tr.Run() + defer tr.Stop() + for i := 0; i < iterations; i++ { + var receivedToken cni.TokenUpdate + Eventually(tokenChan, 5*time.Second).Should(Receive(&receivedToken)) + Expect(receivedToken.Token).NotTo(BeEmpty()) + } + }) +}) + +func createKubernetesClient() *kubernetes.Clientset { + kubeconfigPath := os.Getenv("KUBECONFIG") + kubeconfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + Fail(fmt.Sprintf("Failed to create kubernetes config: %v", err)) + } + clientset, err := kubernetes.NewForConfig(kubeconfig) + if err != nil { + Fail(fmt.Sprintf("Could not create kubernetes client: %v", err)) + } + return clientset +} + +func setupServiceAccount(clientset *kubernetes.Clientset, name string, namespace string) { + serviceAccount := &v1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + _, err := clientset.CoreV1().ServiceAccounts(namespace).Create(context.Background(), serviceAccount, metav1.CreateOptions{}) + Expect(err).ShouldNot(HaveOccurred()) +} + +func cleanupServiceAccount(clientset *kubernetes.Clientset, name string, namespace string) { + err := clientset.CoreV1().ServiceAccounts(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) + Expect(err).ShouldNot(HaveOccurred()) +} diff --git a/node/tests/k8st/infra/calico-kdd.yaml b/node/tests/k8st/infra/calico-kdd.yaml index b45695dcc76..e50c4577a48 100644 --- a/node/tests/k8st/infra/calico-kdd.yaml +++ b/node/tests/k8st/infra/calico-kdd.yaml @@ -230,6 +230,14 @@ rules: - pods/status verbs: - patch + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-node + verbs: + - create # Calico monitors various CRDs for config. - apiGroups: ["crd.projectcalico.org"] resources: