diff --git a/examples/nodeport-cluster.yaml b/examples/nodeport-cluster.yaml new file mode 100644 index 000000000..f5765e61e --- /dev/null +++ b/examples/nodeport-cluster.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: arangodb-exposed-cluster +spec: + selector: + app: arangodb + role: coordinator + type: NodePort + ports: + - protocol: TCP + port: 8529 + targetPort: 8529 + nodePort: 31529 diff --git a/examples/nodeport-single-server.yaml b/examples/nodeport-single-server.yaml new file mode 100644 index 000000000..60cc3f304 --- /dev/null +++ b/examples/nodeport-single-server.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: arangodb-exposed +spec: + selector: + app: arangodb + role: single + type: NodePort + ports: + - protocol: TCP + port: 8529 + targetPort: 8529 + nodePort: 30529 diff --git a/pkg/apis/arangodb/v1alpha/deployment_spec.go b/pkg/apis/arangodb/v1alpha/deployment_spec.go index 76bc20a17..3c60569c8 100644 --- a/pkg/apis/arangodb/v1alpha/deployment_spec.go +++ b/pkg/apis/arangodb/v1alpha/deployment_spec.go @@ -197,7 +197,9 @@ func (s *SSLSpec) SetDefaults() { // SyncSpec holds dc2dc replication specific configuration settings type SyncSpec struct { - Enabled bool `json:"enabled,omitempty"` + Enabled bool `json:"enabled,omitempty"` + Image string `json:"image,omitempty"` + ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` } // Validate the given spec @@ -205,22 +207,31 @@ func (s SyncSpec) Validate(mode DeploymentMode) error { if s.Enabled && !mode.SupportsSync() { return maskAny(errors.Wrapf(ValidationError, "Cannot enable sync with mode: '%s'", mode)) } + if s.Image == "" { + return maskAny(errors.Wrapf(ValidationError, "image must be set")) + } return nil } // SetDefaults fills in missing defaults -func (s *SyncSpec) SetDefaults() { +func (s *SyncSpec) SetDefaults(defaultImage string, defaulPullPolicy v1.PullPolicy) { + if s.Image == "" { + s.Image = defaultImage + } + if s.ImagePullPolicy == "" { + s.ImagePullPolicy = defaulPullPolicy + } } type ServerGroup int const ( - ServerGroupSingle = 1 - ServerGroupAgents = 2 - ServerGroupDBServers = 3 - ServerGroupCoordinators = 4 - ServerGroupSyncMasters = 5 - ServerGroupSyncWorkers = 6 + ServerGroupSingle ServerGroup = 1 + ServerGroupAgents ServerGroup = 2 + ServerGroupDBServers ServerGroup = 3 + ServerGroupCoordinators ServerGroup = 4 + ServerGroupSyncMasters ServerGroup = 5 + ServerGroupSyncWorkers ServerGroup = 6 ) // AsRole returns the "role" value for the given group. @@ -243,6 +254,26 @@ func (g ServerGroup) AsRole() string { } } +// IsArangod returns true when the groups runs servers of type `arangod`. +func (g ServerGroup) IsArangod() bool { + switch g { + case ServerGroupSingle, ServerGroupAgents, ServerGroupDBServers, ServerGroupCoordinators: + return true + default: + return false + } +} + +// IsArangosync returns true when the groups runs servers of type `arangosync`. +func (g ServerGroup) IsArangosync() bool { + switch g { + case ServerGroupSyncMasters, ServerGroupSyncWorkers: + return true + default: + return false + } +} + // ServerGroupSpec contains the specification for all servers in a specific group (e.g. all agents) type ServerGroupSpec struct { // Count holds the requested number of servers @@ -256,11 +287,14 @@ type ServerGroupSpec struct { } // Validate the given group spec -func (s ServerGroupSpec) Validate(group ServerGroup, used bool) error { +func (s ServerGroupSpec) Validate(group ServerGroup, used bool, mode DeploymentMode) error { if used { if s.Count < 1 { return maskAny(errors.Wrapf(ValidationError, "Invalid count value %d. Expected >= 1", s.Count)) } + if s.Count > 1 && group == ServerGroupSingle && mode == DeploymentModeSingle { + return maskAny(errors.Wrapf(ValidationError, "Invalid count value %d. Expected 1", s.Count)) + } } else if s.Count != 0 { return maskAny(errors.Wrapf(ValidationError, "Invalid count value %d for un-used group. Expected 0", s.Count)) } @@ -268,11 +302,15 @@ func (s ServerGroupSpec) Validate(group ServerGroup, used bool) error { } // SetDefaults fills in missing defaults -func (s *ServerGroupSpec) SetDefaults(group ServerGroup, used bool) { +func (s *ServerGroupSpec) SetDefaults(group ServerGroup, used bool, mode DeploymentMode) { if s.Count == 0 && used { switch group { case ServerGroupSingle: - s.Count = 1 + if mode == DeploymentModeSingle { + s.Count = 1 // Single server + } else { + s.Count = 2 // Resilient single + } default: s.Count = 3 } @@ -323,16 +361,19 @@ func (s *DeploymentSpec) SetDefaults() { if s.Image == "" && s.IsDevelopment() { s.Image = defaultImage } + if s.ImagePullPolicy == "" { + s.ImagePullPolicy = v1.PullIfNotPresent + } s.RocksDB.SetDefaults() s.Authentication.SetDefaults() s.SSL.SetDefaults() - s.Sync.SetDefaults() - s.Single.SetDefaults(ServerGroupSingle, s.Mode.HasSingleServers()) - s.Agents.SetDefaults(ServerGroupAgents, s.Mode.HasAgents()) - s.DBServers.SetDefaults(ServerGroupDBServers, s.Mode.HasDBServers()) - s.Coordinators.SetDefaults(ServerGroupCoordinators, s.Mode.HasCoordinators()) - s.SyncMasters.SetDefaults(ServerGroupSyncMasters, s.Sync.Enabled) - s.SyncWorkers.SetDefaults(ServerGroupSyncWorkers, s.Sync.Enabled) + s.Sync.SetDefaults(s.Image, s.ImagePullPolicy) + s.Single.SetDefaults(ServerGroupSingle, s.Mode.HasSingleServers(), s.Mode) + s.Agents.SetDefaults(ServerGroupAgents, s.Mode.HasAgents(), s.Mode) + s.DBServers.SetDefaults(ServerGroupDBServers, s.Mode.HasDBServers(), s.Mode) + s.Coordinators.SetDefaults(ServerGroupCoordinators, s.Mode.HasCoordinators(), s.Mode) + s.SyncMasters.SetDefaults(ServerGroupSyncMasters, s.Sync.Enabled, s.Mode) + s.SyncWorkers.SetDefaults(ServerGroupSyncWorkers, s.Sync.Enabled, s.Mode) } // Validate the specification. @@ -365,22 +406,22 @@ func (s *DeploymentSpec) Validate() error { if err := s.Sync.Validate(s.Mode); err != nil { return maskAny(err) } - if err := s.Single.Validate(ServerGroupSingle, s.Mode.HasSingleServers()); err != nil { + if err := s.Single.Validate(ServerGroupSingle, s.Mode.HasSingleServers(), s.Mode); err != nil { return maskAny(err) } - if err := s.Agents.Validate(ServerGroupAgents, s.Mode.HasAgents()); err != nil { + if err := s.Agents.Validate(ServerGroupAgents, s.Mode.HasAgents(), s.Mode); err != nil { return maskAny(err) } - if err := s.DBServers.Validate(ServerGroupDBServers, s.Mode.HasDBServers()); err != nil { + if err := s.DBServers.Validate(ServerGroupDBServers, s.Mode.HasDBServers(), s.Mode); err != nil { return maskAny(err) } - if err := s.Coordinators.Validate(ServerGroupCoordinators, s.Mode.HasCoordinators()); err != nil { + if err := s.Coordinators.Validate(ServerGroupCoordinators, s.Mode.HasCoordinators(), s.Mode); err != nil { return maskAny(err) } - if err := s.SyncMasters.Validate(ServerGroupSyncMasters, s.Sync.Enabled); err != nil { + if err := s.SyncMasters.Validate(ServerGroupSyncMasters, s.Sync.Enabled, s.Mode); err != nil { return maskAny(err) } - if err := s.SyncWorkers.Validate(ServerGroupSyncWorkers, s.Sync.Enabled); err != nil { + if err := s.SyncWorkers.Validate(ServerGroupSyncWorkers, s.Sync.Enabled, s.Mode); err != nil { return maskAny(err) } return nil diff --git a/pkg/deployment/deployment.go b/pkg/deployment/deployment.go index 5f7999552..9aa0aa696 100644 --- a/pkg/deployment/deployment.go +++ b/pkg/deployment/deployment.go @@ -152,6 +152,12 @@ func (d *Deployment) run() { return } + // Create pods + if err := d.ensurePods(d.apiObject); err != nil { + d.failOnError(err, "Failed to create pods") + return + } + d.status.State = api.DeploymentStateRunning if err := d.updateCRStatus(); err != nil { log.Warn().Err(err).Msg("update initial CR status failed") @@ -185,42 +191,6 @@ func (d *Deployment) handleUpdateEvent(event *deploymentEvent) error { return nil } -// createServices creates all services needed to service the given deployment -func (d *Deployment) createServices(apiObject *api.ArangoDeployment) error { - log := d.deps.Log - kubecli := d.deps.KubeCli - owner := apiObject.AsOwner() - - log.Debug().Msg("creating services...") - - if _, err := k8sutil.CreateHeadlessService(kubecli, apiObject, owner); err != nil { - log.Debug().Err(err).Msg("Failed to create headless service") - return maskAny(err) - } - single := apiObject.Spec.Mode.HasSingleServers() - if svcName, err := k8sutil.CreateDatabaseClientService(kubecli, apiObject, single, owner); err != nil { - log.Debug().Err(err).Msg("Failed to create database client service") - return maskAny(err) - } else { - d.status.ServiceName = svcName - if err := d.updateCRStatus(); err != nil { - return maskAny(err) - } - } - if apiObject.Spec.Sync.Enabled { - if svcName, err := k8sutil.CreateSyncMasterClientService(kubecli, apiObject, owner); err != nil { - log.Debug().Err(err).Msg("Failed to create syncmaster client service") - return maskAny(err) - } else { - d.status.ServiceName = svcName - if err := d.updateCRStatus(); err != nil { - return maskAny(err) - } - } - } - return nil -} - // Update the status of the API object from the internal status func (d *Deployment) updateCRStatus() error { if reflect.DeepEqual(d.apiObject.Status, d.status) { diff --git a/pkg/deployment/members.go b/pkg/deployment/members.go index 73eb2f895..a72bd715a 100644 --- a/pkg/deployment/members.go +++ b/pkg/deployment/members.go @@ -142,27 +142,3 @@ func (d *Deployment) createMember(group api.ServerGroup, apiObject *api.ArangoDe return nil } - -// ensurePVCs creates a PVC's listed in member status -func (d *Deployment) ensurePVCs(apiObject *api.ArangoDeployment) error { - kubecli := d.deps.KubeCli - deploymentName := apiObject.GetName() - ns := apiObject.GetNamespace() - owner := apiObject.AsOwner() - if err := apiObject.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error { - for _, m := range *status { - if m.PersistentVolumeClaimName != "" { - storageClassName := spec.StorageClassName - role := group.AsRole() - resources := spec.Resources - if err := k8sutil.CreatePersistentVolumeClaim(kubecli, m.PersistentVolumeClaimName, deploymentName, ns, storageClassName, role, resources, owner); err != nil { - return maskAny(err) - } - } - } - return nil - }, &d.status); err != nil { - return maskAny(err) - } - return nil -} diff --git a/pkg/deployment/pods.go b/pkg/deployment/pods.go new file mode 100644 index 000000000..afa020172 --- /dev/null +++ b/pkg/deployment/pods.go @@ -0,0 +1,230 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package deployment + +import ( + "fmt" + "net" + "strconv" + + api "github.com/arangodb/k8s-operator/pkg/apis/arangodb/v1alpha" + + "github.com/arangodb/k8s-operator/pkg/util/k8sutil" +) + +type optionPair struct { + Key string + Value string +} + +// createArangodArgs creates command line arguments for an arangod server in the given group. +func (d *Deployment) createArangodArgs(apiObject *api.ArangoDeployment, group api.ServerGroup, spec api.ServerGroupSpec, agents api.MemberStatusList, id string) []string { + options := make([]optionPair, 0, 64) + + // Endpoint + listenAddr := "[::]" + /* if apiObject.Spec.Di.DisableIPv6 { + listenAddr = "0.0.0.0" + }*/ + //scheme := NewURLSchemes(bsCfg.SslKeyFile != "").Arangod + scheme := "tcp" + options = append(options, + optionPair{"--server.endpoint", fmt.Sprintf("%s://%s:%d", scheme, listenAddr, k8sutil.ArangoPort)}, + ) + + // Authentication + if apiObject.Spec.Authentication.JWTSecretName != "" { + // With authentication + options = append(options, + optionPair{"--server.authentication", "true"}, + // TODO jwt-secret file + ) + } else { + // Without authentication + options = append(options, + optionPair{"--server.authentication", "false"}, + ) + } + + // Storage engine + options = append(options, + optionPair{"--server.storage-engine", string(apiObject.Spec.StorageEngine)}, + ) + + // Logging + options = append(options, + optionPair{"--log.level", "INFO"}, + ) + + // SSL + /*if bsCfg.SslKeyFile != "" { + sslSection := &configSection{ + Name: "ssl", + Settings: map[string]string{ + "keyfile": bsCfg.SslKeyFile, + }, + } + if bsCfg.SslCAFile != "" { + sslSection.Settings["cafile"] = bsCfg.SslCAFile + } + config = append(config, sslSection) + }*/ + + // RocksDB + if apiObject.Spec.RocksDB.Encryption.KeySecretName != "" { + /*args = append(args, + fmt.Sprintf("--rocksdb.encryption-keyfile=%s", apiObject.Spec.StorageEngine), + ) + rocksdbSection := &configSection{ + Name: "rocksdb", + Settings: map[string]string{ + "encryption-keyfile": bsCfg.RocksDBEncryptionKeyFile, + }, + } + config = append(config, rocksdbSection)*/ + } + + options = append(options, + optionPair{"--database.directory", k8sutil.ArangodVolumeMountDir}, + optionPair{"--log.output", "+"}, + ) + /* if config.ServerThreads != 0 { + options = append(options, + optionPair{"--server.threads", strconv.Itoa(config.ServerThreads)}) + }*/ + /*if config.DebugCluster { + options = append(options, + optionPair{"--log.level", "startup=trace"}) + }*/ + myTCPURL := scheme + "://" + net.JoinHostPort(k8sutil.CreatePodDNSName(apiObject, group.AsRole(), id), strconv.Itoa(k8sutil.ArangoPort)) + addAgentEndpoints := false + switch group { + case api.ServerGroupAgents: + options = append(options, + optionPair{"--cluster.my-id", id}, + optionPair{"--agency.activate", "true"}, + optionPair{"--agency.my-address", myTCPURL}, + optionPair{"--agency.size", strconv.Itoa(apiObject.Spec.Agents.Count)}, + optionPair{"--agency.supervision", "true"}, + optionPair{"--foxx.queues", "false"}, + optionPair{"--server.statistics", "false"}, + ) + for _, p := range agents { + if p.ID != id { + dnsName := k8sutil.CreatePodDNSName(apiObject, api.ServerGroupAgents.AsRole(), p.ID) + options = append(options, + optionPair{"--agency.endpoint", fmt.Sprintf("%s://%s", scheme, net.JoinHostPort(dnsName, strconv.Itoa(k8sutil.ArangoPort)))}, + ) + } + } + /*if agentRecoveryID != "" { + options = append(options, + optionPair{"--agency.disaster-recovery-id", agentRecoveryID}, + ) + }*/ + case api.ServerGroupDBServers: + addAgentEndpoints = true + options = append(options, + optionPair{"--cluster.my-id", id}, + optionPair{"--cluster.my-address", myTCPURL}, + optionPair{"--cluster.my-role", "PRIMARY"}, + optionPair{"--foxx.queues", "false"}, + optionPair{"--server.statistics", "true"}, + ) + case api.ServerGroupCoordinators: + addAgentEndpoints = true + options = append(options, + optionPair{"--cluster.my-id", id}, + optionPair{"--cluster.my-address", myTCPURL}, + optionPair{"--cluster.my-role", "COORDINATOR"}, + optionPair{"--foxx.queues", "true"}, + optionPair{"--server.statistics", "true"}, + ) + case api.ServerGroupSingle: + options = append(options, + optionPair{"--foxx.queues", "true"}, + optionPair{"--server.statistics", "true"}, + ) + if apiObject.Spec.Mode == api.DeploymentModeResilientSingle { + addAgentEndpoints = true + options = append(options, + optionPair{"--replication.automatic-failover", "true"}, + optionPair{"--cluster.my-id", id}, + optionPair{"--cluster.my-address", myTCPURL}, + optionPair{"--cluster.my-role", "SINGLE"}, + ) + } + } + if addAgentEndpoints { + for _, p := range agents { + dnsName := k8sutil.CreatePodDNSName(apiObject, api.ServerGroupAgents.AsRole(), p.ID) + options = append(options, + optionPair{"--cluster.agency-endpoint", + fmt.Sprintf("%s://%s", scheme, net.JoinHostPort(dnsName, strconv.Itoa(k8sutil.ArangoPort)))}, + ) + } + } + + args := make([]string, 0, len(options)+len(spec.Args)) + for _, o := range options { + args = append(args, o.Key+"="+o.Value) + } + args = append(args, spec.Args...) + + return args +} + +// createArangoSyncArgs creates command line arguments for an arangosync server in the given group. +func (d *Deployment) createArangoSyncArgs(apiObject *api.ArangoDeployment, group api.ServerGroup, spec api.ServerGroupSpec, agents api.MemberStatusList, id string) []string { + // TODO + return nil +} + +// ensurePods creates all Pods listed in member status +func (d *Deployment) ensurePods(apiObject *api.ArangoDeployment) error { + kubecli := d.deps.KubeCli + owner := apiObject.AsOwner() + + if err := apiObject.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error { + for _, m := range *status { + role := group.AsRole() + if group.IsArangod() { + args := d.createArangodArgs(apiObject, group, spec, d.status.Members.Agents, m.ID) + env := make(map[string]string) + if err := k8sutil.CreateArangodPod(kubecli, apiObject, role, m.ID, m.PersistentVolumeClaimName, apiObject.Spec.Image, apiObject.Spec.ImagePullPolicy, args, env, owner); err != nil { + return maskAny(err) + } + } else if group.IsArangosync() { + args := d.createArangoSyncArgs(apiObject, group, spec, d.status.Members.Agents, m.ID) + env := make(map[string]string) + if err := k8sutil.CreateArangoSyncPod(kubecli, apiObject, role, m.ID, apiObject.Spec.Sync.Image, apiObject.Spec.Sync.ImagePullPolicy, args, env, owner); err != nil { + return maskAny(err) + } + } + } + return nil + }, &d.status); err != nil { + return maskAny(err) + } + return nil +} diff --git a/pkg/deployment/pvcs.go b/pkg/deployment/pvcs.go new file mode 100644 index 000000000..4847e2a14 --- /dev/null +++ b/pkg/deployment/pvcs.go @@ -0,0 +1,54 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package deployment + +import ( + api "github.com/arangodb/k8s-operator/pkg/apis/arangodb/v1alpha" + + "github.com/arangodb/k8s-operator/pkg/util/k8sutil" +) + +// ensurePVCs creates all PVC's listed in member status +func (d *Deployment) ensurePVCs(apiObject *api.ArangoDeployment) error { + kubecli := d.deps.KubeCli + deploymentName := apiObject.GetName() + ns := apiObject.GetNamespace() + owner := apiObject.AsOwner() + + if err := apiObject.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error { + for _, m := range *status { + if m.PersistentVolumeClaimName != "" { + storageClassName := spec.StorageClassName + role := group.AsRole() + resources := spec.Resources + if err := k8sutil.CreatePersistentVolumeClaim(kubecli, m.PersistentVolumeClaimName, deploymentName, ns, storageClassName, role, resources, owner); err != nil { + return maskAny(err) + } + } + } + return nil + }, &d.status); err != nil { + return maskAny(err) + } + return nil +} diff --git a/pkg/deployment/services.go b/pkg/deployment/services.go new file mode 100644 index 000000000..817685a45 --- /dev/null +++ b/pkg/deployment/services.go @@ -0,0 +1,64 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package deployment + +import ( + api "github.com/arangodb/k8s-operator/pkg/apis/arangodb/v1alpha" + "github.com/arangodb/k8s-operator/pkg/util/k8sutil" +) + +// createServices creates all services needed to service the given deployment +func (d *Deployment) createServices(apiObject *api.ArangoDeployment) error { + log := d.deps.Log + kubecli := d.deps.KubeCli + owner := apiObject.AsOwner() + + log.Debug().Msg("creating services...") + + if _, err := k8sutil.CreateHeadlessService(kubecli, apiObject, owner); err != nil { + log.Debug().Err(err).Msg("Failed to create headless service") + return maskAny(err) + } + single := apiObject.Spec.Mode.HasSingleServers() + if svcName, err := k8sutil.CreateDatabaseClientService(kubecli, apiObject, single, owner); err != nil { + log.Debug().Err(err).Msg("Failed to create database client service") + return maskAny(err) + } else { + d.status.ServiceName = svcName + if err := d.updateCRStatus(); err != nil { + return maskAny(err) + } + } + if apiObject.Spec.Sync.Enabled { + if svcName, err := k8sutil.CreateSyncMasterClientService(kubecli, apiObject, owner); err != nil { + log.Debug().Err(err).Msg("Failed to create syncmaster client service") + return maskAny(err) + } else { + d.status.ServiceName = svcName + if err := d.updateCRStatus(); err != nil { + return maskAny(err) + } + } + } + return nil +} diff --git a/pkg/util/k8sutil/dns.go b/pkg/util/k8sutil/dns.go new file mode 100644 index 000000000..dd2fd3f75 --- /dev/null +++ b/pkg/util/k8sutil/dns.go @@ -0,0 +1,35 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package k8sutil + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CreatePodDNSName returns the DNS of a pod with a given role & id in +// a given deployment. +func CreatePodDNSName(deployment metav1.Object, role, id string) string { + return CreatePodName(deployment.GetName(), role, id) + "." + + CreateHeadlessServiceName(deployment.GetName()) + "." + + deployment.GetNamespace() + ".svc" +} diff --git a/pkg/util/k8sutil/pods.go b/pkg/util/k8sutil/pods.go index a9bfd0962..488180471 100644 --- a/pkg/util/k8sutil/pods.go +++ b/pkg/util/k8sutil/pods.go @@ -25,11 +25,12 @@ package k8sutil import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" ) const ( arangodVolumeName = "arangod-data" - arangodVolumeMountDir = "/data" + ArangodVolumeMountDir = "/data" ) // CreatePodName returns the name of the pod for a member with @@ -41,16 +42,17 @@ func CreatePodName(deploymentName, role, id string) string { // arangodVolumeMounts creates a volume mount structure for arangod. func arangodVolumeMounts() []v1.VolumeMount { return []v1.VolumeMount{ - {Name: arangodVolumeName, MountPath: arangodVolumeMountDir}, + {Name: arangodVolumeName, MountPath: ArangodVolumeMountDir}, } } // arangodContainer creates a container configured to run `arangod`. -func arangodContainer(name string, args []string, image string) v1.Container { +func arangodContainer(name string, image string, imagePullPolicy v1.PullPolicy, args []string, env map[string]string) v1.Container { c := v1.Container{ - Command: append([]string{"/usr/sbin/arangod"}, args...), - Name: name, - Image: image, + Command: append([]string{"/usr/sbin/arangod"}, args...), + Name: name, + Image: image, + ImagePullPolicy: imagePullPolicy, Ports: []v1.ContainerPort{ { Name: "server", @@ -60,24 +62,121 @@ func arangodContainer(name string, args []string, image string) v1.Container { }, VolumeMounts: arangodVolumeMounts(), } + for k, v := range env { + c.Env = append(c.Env, v1.EnvVar{ + Name: k, + Value: v, + }) + } return c } -// arangodPod creates a container configured to run `arangod`. -func arangodPod(clusterName, name string, args []string, image string) v1.Pod { +// arangosyncContainer creates a container configured to run `arangosync`. +func arangosyncContainer(name string, image string, imagePullPolicy v1.PullPolicy, args []string, env map[string]string) v1.Container { + c := v1.Container{ + Command: append([]string{"/usr/sbin/arangosync"}, args...), + Name: name, + Image: image, + ImagePullPolicy: imagePullPolicy, + Ports: []v1.ContainerPort{ + { + Name: "server", + ContainerPort: int32(ArangoPort), + Protocol: v1.ProtocolTCP, + }, + }, + } + for k, v := range env { + c.Env = append(c.Env, v1.EnvVar{ + Name: k, + Value: v, + }) + } + + return c +} + +// newPod creates a basic Pod for given settings. +func newPod(deploymentName, ns, role, id string) v1.Pod { + name := CreatePodName(deploymentName, role, id) p := v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: name, + Labels: LabelsForDeployment(deploymentName, role), }, Spec: v1.PodSpec{ - Containers: []v1.Container{ - arangodContainer(name, args, image), - }, Hostname: name, - Subdomain: clusterName, + Subdomain: CreateHeadlessServiceName(deploymentName), }, } - return p } + +// CreateArangodPod creates a Pod that runs `arangod`. +// If the pod already exists, nil is returned. +// If another error occurs, that error is returned. +func CreateArangodPod(kubecli kubernetes.Interface, deployment metav1.Object, role, id, pvcName, image string, imagePullPolicy v1.PullPolicy, args []string, env map[string]string, owner metav1.OwnerReference) error { + // Prepare basic pod + p := newPod(deployment.GetName(), deployment.GetNamespace(), role, id) + + // Add arangod container + c := arangodContainer(p.GetName(), image, imagePullPolicy, args, env) + p.Spec.Containers = append(p.Spec.Containers, c) + + // Add volume + if pvcName != "" { + // Create PVC + vol := v1.Volume{ + Name: arangodVolumeName, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + } + p.Spec.Volumes = append(p.Spec.Volumes, vol) + } else { + // Create emptydir volume + vol := v1.Volume{ + Name: arangodVolumeName, + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + } + p.Spec.Volumes = append(p.Spec.Volumes, vol) + } + + if err := createPod(kubecli, &p, deployment.GetNamespace(), owner); err != nil { + return maskAny(err) + } + return nil +} + +// CreateArangoSyncPod creates a Pod that runs `arangosync`. +// If the pod already exists, nil is returned. +// If another error occurs, that error is returned. +func CreateArangoSyncPod(kubecli kubernetes.Interface, deployment metav1.Object, role, id, image string, imagePullPolicy v1.PullPolicy, args []string, env map[string]string, owner metav1.OwnerReference) error { + // Prepare basic pod + p := newPod(deployment.GetName(), deployment.GetNamespace(), role, id) + + // Add arangosync container + c := arangosyncContainer(p.GetName(), image, imagePullPolicy, args, env) + p.Spec.Containers = append(p.Spec.Containers, c) + + if err := createPod(kubecli, &p, deployment.GetNamespace(), owner); err != nil { + return maskAny(err) + } + return nil +} + +// createPod adds an owner to the given pod and calls the k8s api-server to created it. +// If the pod already exists, nil is returned. +// If another error occurs, that error is returned. +func createPod(kubecli kubernetes.Interface, pod *v1.Pod, ns string, owner metav1.OwnerReference) error { + addOwnerRefToObject(pod.GetObjectMeta(), owner) + if _, err := kubecli.CoreV1().Pods(ns).Create(pod); err != nil && !IsAlreadyExists(err) { + return maskAny(err) + } + return nil +} diff --git a/pkg/util/k8sutil/pvc.go b/pkg/util/k8sutil/pvc.go index c15b8791f..1fb2d0393 100644 --- a/pkg/util/k8sutil/pvc.go +++ b/pkg/util/k8sutil/pvc.go @@ -49,11 +49,13 @@ func CreatePersistentVolumeClaim(kubecli kubernetes.Interface, pvcName, deployme AccessModes: []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, }, - VolumeMode: &volumeMode, - StorageClassName: &storageClassName, - Resources: resources, + VolumeMode: &volumeMode, + Resources: resources, }, } + if storageClassName != "" { + pvc.Spec.StorageClassName = &storageClassName + } addOwnerRefToObject(pvc.GetObjectMeta(), owner) if _, err := kubecli.CoreV1().PersistentVolumeClaims(ns).Create(pvc); err != nil && !IsAlreadyExists(err) { return maskAny(err)