diff --git a/docs/Manual/Deployment/Kubernetes/DeploymentResource.md b/docs/Manual/Deployment/Kubernetes/DeploymentResource.md index 27e1383ee..06a271c9e 100644 --- a/docs/Manual/Deployment/Kubernetes/DeploymentResource.md +++ b/docs/Manual/Deployment/Kubernetes/DeploymentResource.md @@ -335,6 +335,14 @@ The default value is `8Gi`. This setting is not available for group `coordinators`, `syncmasters` & `syncworkers` because servers in these groups do not need persistent storage. +### `spec..serviceAccountName: string` + +This setting specifies the `serviceAccountName` for the `Pods` created +for each server of this group. + +Using an alternative `ServiceAccount` is typically used to separate access rights. +The ArangoDB deployments do not require any special rights. + ### `spec..storageClassName: string` This setting specifies the `storageClass` for the `PersistentVolume`s created diff --git a/manifests/templates/test/rbac.yaml b/manifests/templates/test/rbac.yaml index a54c17d9a..16848c872 100644 --- a/manifests/templates/test/rbac.yaml +++ b/manifests/templates/test/rbac.yaml @@ -10,7 +10,7 @@ rules: resources: ["nodes"] verbs: ["list"] - apiGroups: [""] - resources: ["pods", "services", "persistentvolumes", "persistentvolumeclaims", "secrets"] + resources: ["pods", "services", "persistentvolumes", "persistentvolumeclaims", "secrets", "serviceaccounts"] verbs: ["*"] - apiGroups: ["apps"] resources: ["daemonsets"] diff --git a/pkg/apis/deployment/v1alpha/server_group_spec.go b/pkg/apis/deployment/v1alpha/server_group_spec.go index df267baee..d8393c77f 100644 --- a/pkg/apis/deployment/v1alpha/server_group_spec.go +++ b/pkg/apis/deployment/v1alpha/server_group_spec.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/arangodb/kube-arangodb/pkg/util" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" ) // ServerGroupSpec contains the specification for all servers in a specific group (e.g. all agents) @@ -42,6 +43,8 @@ type ServerGroupSpec struct { Resources v1.ResourceRequirements `json:"resources,omitempty"` // Tolerations specifies the tolerations added to Pods in this group. Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // ServiceAccountName specifies the name of the service account used for Pods in this group. + ServiceAccountName *string `json:"serviceAccountName,omitempty"` } // GetCount returns the value of count. @@ -64,6 +67,11 @@ func (s ServerGroupSpec) GetTolerations() []v1.Toleration { return s.Tolerations } +// GetServiceAccountName returns the value of serviceAccountName. +func (s ServerGroupSpec) GetServiceAccountName() string { + return util.StringOrDefault(s.ServiceAccountName) +} + // Validate the given group spec func (s ServerGroupSpec) Validate(group ServerGroup, used bool, mode DeploymentMode, env Environment) error { if used { @@ -86,6 +94,16 @@ func (s ServerGroupSpec) Validate(group ServerGroup, used bool, mode DeploymentM if s.GetCount() > 1 && group == ServerGroupSingle && mode == DeploymentModeSingle { return maskAny(errors.Wrapf(ValidationError, "Invalid count value %d. Expected 1", s.GetCount())) } + if name := s.GetServiceAccountName(); name != "" { + if err := k8sutil.ValidateOptionalResourceName(name); err != nil { + return maskAny(errors.Wrapf(ValidationError, "Invalid serviceAccountName: %s", err)) + } + } + if name := s.GetStorageClassName(); name != "" { + if err := k8sutil.ValidateOptionalResourceName(name); err != nil { + return maskAny(errors.Wrapf(ValidationError, "Invalid storageClassName: %s", err)) + } + } } else if s.GetCount() != 0 { return maskAny(errors.Wrapf(ValidationError, "Invalid count value %d for un-used group. Expected 0", s.GetCount())) } @@ -145,6 +163,9 @@ func (s *ServerGroupSpec) SetDefaultsFrom(source ServerGroupSpec) { if s.Tolerations == nil { s.Tolerations = source.Tolerations } + if s.ServiceAccountName == nil { + s.ServiceAccountName = util.NewStringOrNil(source.ServiceAccountName) + } setDefaultsFromResourceList(&s.Resources.Limits, source.Resources.Limits) setDefaultsFromResourceList(&s.Resources.Requests, source.Resources.Requests) } diff --git a/pkg/apis/deployment/v1alpha/zz_generated.deepcopy.go b/pkg/apis/deployment/v1alpha/zz_generated.deepcopy.go index e0f332710..aba704632 100644 --- a/pkg/apis/deployment/v1alpha/zz_generated.deepcopy.go +++ b/pkg/apis/deployment/v1alpha/zz_generated.deepcopy.go @@ -597,6 +597,15 @@ func (in *ServerGroupSpec) DeepCopyInto(out *ServerGroupSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ServiceAccountName != nil { + in, out := &in.ServiceAccountName, &out.ServiceAccountName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } return } diff --git a/pkg/deployment/images.go b/pkg/deployment/images.go index c5cd69c17..b29108752 100644 --- a/pkg/deployment/images.go +++ b/pkg/deployment/images.go @@ -176,8 +176,10 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, ima tolerations = k8sutil.AddTolerationIfNotFound(tolerations, k8sutil.NewNoExecuteToleration(k8sutil.TolerationKeyNodeNotReady, shortDur)) tolerations = k8sutil.AddTolerationIfNotFound(tolerations, k8sutil.NewNoExecuteToleration(k8sutil.TolerationKeyNodeUnreachable, shortDur)) tolerations = k8sutil.AddTolerationIfNotFound(tolerations, k8sutil.NewNoExecuteToleration(k8sutil.TolerationKeyNodeAlphaUnreachable, shortDur)) + serviceAccountName := "" - if err := k8sutil.CreateArangodPod(ib.KubeCli, true, ib.APIObject, role, id, podName, "", image, "", ib.Spec.GetImagePullPolicy(), "", false, terminationGracePeriod, args, nil, nil, nil, nil, tolerations, "", ""); err != nil { + if err := k8sutil.CreateArangodPod(ib.KubeCli, true, ib.APIObject, role, id, podName, "", image, "", ib.Spec.GetImagePullPolicy(), "", false, terminationGracePeriod, args, nil, nil, nil, nil, + tolerations, serviceAccountName, "", ""); err != nil { log.Debug().Err(err).Msg("Failed to create image ID pod") return true, maskAny(err) } diff --git a/pkg/deployment/reconcile/plan_builder.go b/pkg/deployment/reconcile/plan_builder.go index 6026a639c..ca931981e 100644 --- a/pkg/deployment/reconcile/plan_builder.go +++ b/pkg/deployment/reconcile/plan_builder.go @@ -266,6 +266,8 @@ func podNeedsUpgrading(p v1.Pod, spec api.DeploymentSpec, images api.ImageInfoLi // When true is returned, a reason for the rotation is already returned. func podNeedsRotation(p v1.Pod, apiObject metav1.Object, spec api.DeploymentSpec, group api.ServerGroup, agents api.MemberStatusList, id string) (bool, string) { + groupSpec := spec.GetServerGroupSpec(group) + // Check image pull policy if c, found := k8sutil.GetContainerByName(&p, k8sutil.ServerContainerName); found { if c.ImagePullPolicy != spec.GetImagePullPolicy() { @@ -274,6 +276,7 @@ func podNeedsRotation(p v1.Pod, apiObject metav1.Object, spec api.DeploymentSpec } else { return true, "Server container not found" } + // Check arguments /*expectedArgs := createArangodArgs(apiObject, spec, group, agents, id) if len(expectedArgs) != len(c.Args) { @@ -285,6 +288,11 @@ func podNeedsRotation(p v1.Pod, apiObject metav1.Object, spec api.DeploymentSpec } }*/ + // Check service account + if p.Spec.ServiceAccountName != groupSpec.GetServiceAccountName() { + return true, "ServiceAccountName changed" + } + return false, "" } diff --git a/pkg/deployment/resources/pod_creator.go b/pkg/deployment/resources/pod_creator.go index 0241048fa..ff0c7b77e 100644 --- a/pkg/deployment/resources/pod_creator.go +++ b/pkg/deployment/resources/pod_creator.go @@ -438,6 +438,7 @@ func (r *Resources) createPodForMember(spec api.DeploymentSpec, group api.Server lifecycleImage := r.context.GetLifecycleImage() terminationGracePeriod := group.DefaultTerminationGracePeriod() tolerations := r.createPodTolerations(group, groupSpec) + serviceAccountName := groupSpec.GetServiceAccountName() // Update pod name role := group.AsRole() @@ -500,7 +501,7 @@ func (r *Resources) createPodForMember(spec api.DeploymentSpec, group api.Server requireUUID := group == api.ServerGroupDBServers && m.IsInitialized finalizers := r.createPodFinalizers(group) if err := k8sutil.CreateArangodPod(kubecli, spec.IsDevelopment(), apiObject, role, m.ID, m.PodName, m.PersistentVolumeClaimName, imageInfo.ImageID, lifecycleImage, spec.GetImagePullPolicy(), - engine, requireUUID, terminationGracePeriod, args, env, finalizers, livenessProbe, readinessProbe, tolerations, tlsKeyfileSecretName, rocksdbEncryptionSecretName); err != nil { + engine, requireUUID, terminationGracePeriod, args, env, finalizers, livenessProbe, readinessProbe, tolerations, serviceAccountName, tlsKeyfileSecretName, rocksdbEncryptionSecretName); err != nil { return maskAny(err) } log.Debug().Str("pod-name", m.PodName).Msg("Created pod") @@ -571,7 +572,7 @@ func (r *Resources) createPodForMember(spec api.DeploymentSpec, group api.Server affinityWithRole = api.ServerGroupDBServers.AsRole() } if err := k8sutil.CreateArangoSyncPod(kubecli, spec.IsDevelopment(), apiObject, role, m.ID, m.PodName, imageInfo.ImageID, lifecycleImage, spec.GetImagePullPolicy(), terminationGracePeriod, args, env, - livenessProbe, tolerations, tlsKeyfileSecretName, clientAuthCASecretName, masterJWTSecretName, clusterJWTSecretName, affinityWithRole); err != nil { + livenessProbe, tolerations, serviceAccountName, tlsKeyfileSecretName, clientAuthCASecretName, masterJWTSecretName, clusterJWTSecretName, affinityWithRole); err != nil { return maskAny(err) } log.Debug().Str("pod-name", m.PodName).Msg("Created pod") diff --git a/pkg/util/k8sutil/pods.go b/pkg/util/k8sutil/pods.go index 80f6c4a78..d651bdca2 100644 --- a/pkg/util/k8sutil/pods.go +++ b/pkg/util/k8sutil/pods.go @@ -384,7 +384,7 @@ func initLifecycleContainer(image string) (v1.Container, error) { } // newPod creates a basic Pod for given settings. -func newPod(deploymentName, ns, role, id, podName string, finalizers []string, tolerations []v1.Toleration) v1.Pod { +func newPod(deploymentName, ns, role, id, podName string, finalizers []string, tolerations []v1.Toleration, serviceAccountName string) v1.Pod { hostname := CreatePodHostName(deploymentName, role, id) p := v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -393,10 +393,11 @@ func newPod(deploymentName, ns, role, id, podName string, finalizers []string, t Finalizers: finalizers, }, Spec: v1.PodSpec{ - Hostname: hostname, - Subdomain: CreateHeadlessServiceName(deploymentName), - RestartPolicy: v1.RestartPolicyNever, - Tolerations: tolerations, + Hostname: hostname, + Subdomain: CreateHeadlessServiceName(deploymentName), + RestartPolicy: v1.RestartPolicyNever, + Tolerations: tolerations, + ServiceAccountName: serviceAccountName, }, } return p @@ -409,10 +410,10 @@ func CreateArangodPod(kubecli kubernetes.Interface, developmentMode bool, deploy role, id, podName, pvcName, image, lifecycleImage string, imagePullPolicy v1.PullPolicy, engine string, requireUUID bool, terminationGracePeriod time.Duration, args []string, env map[string]EnvValue, finalizers []string, - livenessProbe *HTTPProbeConfig, readinessProbe *HTTPProbeConfig, tolerations []v1.Toleration, + livenessProbe *HTTPProbeConfig, readinessProbe *HTTPProbeConfig, tolerations []v1.Toleration, serviceAccountName string, tlsKeyfileSecretName, rocksdbEncryptionSecretName string) error { // Prepare basic pod - p := newPod(deployment.GetName(), deployment.GetNamespace(), role, id, podName, finalizers, tolerations) + p := newPod(deployment.GetName(), deployment.GetNamespace(), role, id, podName, finalizers, tolerations, serviceAccountName) terminationGracePeriodSeconds := int64(math.Ceil(terminationGracePeriod.Seconds())) p.Spec.TerminationGracePeriodSeconds = &terminationGracePeriodSeconds @@ -510,10 +511,10 @@ func CreateArangodPod(kubecli kubernetes.Interface, developmentMode bool, deploy // If the pod already exists, nil is returned. // If another error occurs, that error is returned. func CreateArangoSyncPod(kubecli kubernetes.Interface, developmentMode bool, deployment APIObject, role, id, podName, image, lifecycleImage string, imagePullPolicy v1.PullPolicy, - terminationGracePeriod time.Duration, args []string, env map[string]EnvValue, livenessProbe *HTTPProbeConfig, tolerations []v1.Toleration, + terminationGracePeriod time.Duration, args []string, env map[string]EnvValue, livenessProbe *HTTPProbeConfig, tolerations []v1.Toleration, serviceAccountName string, tlsKeyfileSecretName, clientAuthCASecretName, masterJWTSecretName, clusterJWTSecretName, affinityWithRole string) error { // Prepare basic pod - p := newPod(deployment.GetName(), deployment.GetNamespace(), role, id, podName, nil, tolerations) + p := newPod(deployment.GetName(), deployment.GetNamespace(), role, id, podName, nil, tolerations, serviceAccountName) terminationGracePeriodSeconds := int64(math.Ceil(terminationGracePeriod.Seconds())) p.Spec.TerminationGracePeriodSeconds = &terminationGracePeriodSeconds diff --git a/tests/service_account_test.go b/tests/service_account_test.go new file mode 100644 index 000000000..02e608252 --- /dev/null +++ b/tests/service_account_test.go @@ -0,0 +1,304 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package tests + +import ( + "context" + "strings" + "testing" + + "github.com/dchest/uniuri" + "github.com/stretchr/testify/assert" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + + driver "github.com/arangodb/go-driver" + api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha" + "github.com/arangodb/kube-arangodb/pkg/client" + "github.com/arangodb/kube-arangodb/pkg/util" +) + +// TestServiceAccountSingle tests the creating of a single server deployment +// with default settings using a custom service account. +func TestServiceAccountSingle(t *testing.T) { + longOrSkip(t) + c := client.MustNewInCluster() + kubecli := mustNewKubeClient(t) + ns := getNamespace(t) + + // Prepare service account + namePrefix := "test-sa-sng-" + saName := mustCreateServiceAccount(kubecli, namePrefix, ns, t) + defer deleteServiceAccount(kubecli, saName, ns) + + // Prepare deployment config + depl := newDeployment(namePrefix + uniuri.NewLen(4)) + depl.Spec.Mode = api.NewMode(api.DeploymentModeSingle) + depl.Spec.Single.ServiceAccountName = util.NewString(saName) + + // Create deployment + _, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl) + if err != nil { + t.Fatalf("Create deployment failed: %v", err) + } + // Prepare cleanup + defer removeDeployment(c, depl.GetName(), ns) + + // Wait for deployment to be ready + apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) + if err != nil { + t.Fatalf("Deployment not running in time: %v", err) + } + + // Create a database client + ctx := context.Background() + client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t) + + // Wait for single server available + if err := waitUntilVersionUp(client, nil); err != nil { + t.Fatalf("Single server not running returning version in time: %v", err) + } + + // Check service account name + checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Single, saName, t) + + // Check server role + assert.NoError(t, client.SynchronizeEndpoints(ctx)) + role, err := client.ServerRole(ctx) + assert.NoError(t, err) + assert.Equal(t, driver.ServerRoleSingle, role) +} + +// TestServiceAccountActiveFailover tests the creating of a ActiveFailover server deployment +// with default settings using a custom service account. +func TestServiceAccountActiveFailover(t *testing.T) { + longOrSkip(t) + c := client.MustNewInCluster() + kubecli := mustNewKubeClient(t) + ns := getNamespace(t) + + // Prepare service account + namePrefix := "test-sa-rs-" + saName := mustCreateServiceAccount(kubecli, namePrefix, ns, t) + defer deleteServiceAccount(kubecli, saName, ns) + + // Prepare deployment config + depl := newDeployment(namePrefix + uniuri.NewLen(4)) + depl.Spec.Mode = api.NewMode(api.DeploymentModeActiveFailover) + depl.Spec.Single.ServiceAccountName = util.NewString(saName) + depl.Spec.Agents.ServiceAccountName = util.NewString(saName) + + // Create deployment + _, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl) + if err != nil { + t.Fatalf("Create deployment failed: %v", err) + } + // Prepare cleanup + defer removeDeployment(c, depl.GetName(), ns) + + // Wait for deployment to be ready + apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) + if err != nil { + t.Fatalf("Deployment not running in time: %v", err) + } + + // Create a database client + ctx := context.Background() + client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t) + + // Wait for single server available + if err := waitUntilVersionUp(client, nil); err != nil { + t.Fatalf("ActiveFailover servers not running returning version in time: %v", err) + } + + // Check service account name + checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Single, saName, t) + checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Agents, saName, t) + + // Check server role + assert.NoError(t, client.SynchronizeEndpoints(ctx)) + role, err := client.ServerRole(ctx) + assert.NoError(t, err) + assert.Equal(t, driver.ServerRoleSingleActive, role) +} + +// TestServiceAccountCluster tests the creating of a cluster deployment +// with default settings using a custom service account. +func TestServiceAccountCluster(t *testing.T) { + longOrSkip(t) + c := client.MustNewInCluster() + kubecli := mustNewKubeClient(t) + ns := getNamespace(t) + + // Prepare service account + namePrefix := "test-sa-cls-" + saName := mustCreateServiceAccount(kubecli, namePrefix, ns, t) + defer deleteServiceAccount(kubecli, saName, ns) + + // Prepare deployment config + depl := newDeployment(namePrefix + uniuri.NewLen(4)) + depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) + depl.Spec.Agents.ServiceAccountName = util.NewString(saName) + depl.Spec.DBServers.ServiceAccountName = util.NewString(saName) + depl.Spec.Coordinators.ServiceAccountName = util.NewString(saName) + + // Create deployment + _, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl) + if err != nil { + t.Fatalf("Create deployment failed: %v", err) + } + // Prepare cleanup + defer removeDeployment(c, depl.GetName(), ns) + + // Wait for deployment to be ready + apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) + if err != nil { + t.Fatalf("Deployment not running in time: %v", err) + } + + // Create a database client + ctx := context.Background() + client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t) + + // Wait for cluster to be available + if err := waitUntilVersionUp(client, nil); err != nil { + t.Fatalf("Cluster not running returning version in time: %v", err) + } + + // Check service account name + checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Agents, saName, t) + checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Coordinators, saName, t) + checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.DBServers, saName, t) + + // Check server role + assert.NoError(t, client.SynchronizeEndpoints(ctx)) + role, err := client.ServerRole(ctx) + assert.NoError(t, err) + assert.Equal(t, driver.ServerRoleCoordinator, role) +} + +// TestServiceAccountClusterWithSync tests the creating of a cluster deployment +// with default settings and sync enabled using a custom service account. +func TestServiceAccountClusterWithSync(t *testing.T) { + longOrSkip(t) + img := getEnterpriseImageOrSkip(t) + c := client.MustNewInCluster() + kubecli := mustNewKubeClient(t) + ns := getNamespace(t) + + // Prepare service account + namePrefix := "test-sa-cls-sync-" + saName := mustCreateServiceAccount(kubecli, namePrefix, ns, t) + defer deleteServiceAccount(kubecli, saName, ns) + + // Prepare deployment config + depl := newDeployment(namePrefix + uniuri.NewLen(4)) + depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) + depl.Spec.Image = util.NewString(img) + depl.Spec.Sync.Enabled = util.NewBool(true) + depl.Spec.Agents.ServiceAccountName = util.NewString(saName) + depl.Spec.DBServers.ServiceAccountName = util.NewString(saName) + depl.Spec.Coordinators.ServiceAccountName = util.NewString(saName) + depl.Spec.SyncMasters.ServiceAccountName = util.NewString(saName) + depl.Spec.SyncWorkers.ServiceAccountName = util.NewString(saName) + + // Create deployment + _, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl) + if err != nil { + t.Fatalf("Create deployment failed: %v", err) + } + // Prepare cleanup + defer removeDeployment(c, depl.GetName(), ns) + + // Wait for deployment to be ready + apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) + if err != nil { + t.Fatalf("Deployment not running in time: %v", err) + } + + // Create a database client + ctx := context.Background() + client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t) + + // Wait for cluster to be available + if err := waitUntilVersionUp(client, nil); err != nil { + t.Fatalf("Cluster not running returning version in time: %v", err) + } + + // Create a syncmaster client + syncClient := mustNewArangoSyncClient(ctx, kubecli, apiObject, t) + + // Wait for syncmasters to be available + if err := waitUntilSyncVersionUp(syncClient, nil); err != nil { + t.Fatalf("SyncMasters not running returning version in time: %v", err) + } + + // Check service account name + checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Agents, saName, t) + checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Coordinators, saName, t) + checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.DBServers, saName, t) + checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.SyncMasters, saName, t) + checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.SyncWorkers, saName, t) + + // Check server role + assert.NoError(t, client.SynchronizeEndpoints(ctx)) + role, err := client.ServerRole(ctx) + assert.NoError(t, err) + assert.Equal(t, driver.ServerRoleCoordinator, role) +} + +// mustCreateServiceAccount creates an empty service account with random name and returns +// its name. On error, the test is failed. +func mustCreateServiceAccount(kubecli kubernetes.Interface, namePrefix, ns string, t *testing.T) string { + s := v1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.ToLower(namePrefix + uniuri.NewLen(4)), + }, + } + if _, err := kubecli.CoreV1().ServiceAccounts(ns).Create(&s); err != nil { + t.Fatalf("Failed to create service account: %v", err) + } + return s.GetName() +} + +// deleteServiceAccount deletes a service account with given name in given namespace. +func deleteServiceAccount(kubecli kubernetes.Interface, name, ns string) error { + if err := kubecli.CoreV1().ServiceAccounts(ns).Delete(name, &metav1.DeleteOptions{}); err != nil { + return maskAny(err) + } + return nil +} + +// checkMembersUsingServiceAccount checks the serviceAccountName of the pods of all members +// to ensure that is equal to the given serviceAccountName. +func checkMembersUsingServiceAccount(kubecli kubernetes.Interface, ns string, members []api.MemberStatus, serviceAccountName string, t *testing.T) { + pods := kubecli.CoreV1().Pods(ns) + for _, m := range members { + if p, err := pods.Get(m.PodName, metav1.GetOptions{}); err != nil { + t.Errorf("Failed to get pod for member '%s': %v", m.ID, err) + } else if p.Spec.ServiceAccountName != serviceAccountName { + t.Errorf("Expected pod '%s' to have serviceAccountName '%s', got '%s'", p.GetName(), serviceAccountName, p.Spec.ServiceAccountName) + } + } +}