Skip to content
This repository has been archived by the owner on Mar 28, 2020. It is now read-only.

Commit

Permalink
Add option for operator to act as cluster wide
Browse files Browse the repository at this point in the history
for a cluster to be managed widely, user have to add annotation:
  etcd.database.coreos.com/scope: clusterwide
And admin have to run an operator with option "-cluster-wide".

Current implementation have a lack of locking if many operators with
cluster-wide run on different namespaces.
  • Loading branch information
guilhem committed Feb 4, 2018
1 parent 51e0e7d commit 068fd64
Show file tree
Hide file tree
Showing 8 changed files with 132 additions and 6 deletions.
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -279,9 +279,11 @@ Follow the [etcd backup operator walkthrough](./doc/user/walkthrough/backup-oper

Follow the [etcd restore operator walkthrough](./doc/user/walkthrough/restore-operator.md) to restore an etcd cluster on Kubernetes from backup.

### Limitations
### Manage etcd clusters in all namespaces

See [instructions on clusterwide feature](doc/user/clusterwide.md).

- The etcd operator only manages the etcd cluster created in the same namespace. Users need to create multiple operators in different namespaces to manage etcd clusters in different namespaces.
### Limitations

- Migration, the process of allowing the etcd operator to manage existing etcd3 clusters, only supports a single-member cluster, with its node running in the same Kubernetes cluster.

Expand Down
4 changes: 4 additions & 0 deletions cmd/operator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ var (
printVersion bool

createCRD bool

clusterWide bool
)

func init() {
Expand All @@ -69,6 +71,7 @@ func init() {
flag.BoolVar(&printVersion, "version", false, "Show version and quit")
flag.BoolVar(&createCRD, "create-crd", true, "The operator will not create the EtcdCluster CRD when this flag is set to false.")
flag.DurationVar(&gcInterval, "gc-interval", 10*time.Minute, "GC interval")
flag.BoolVar(&clusterWide, "cluster-wide", false, "The operator will watch clusters in all namespaces")
flag.Parse()
}

Expand Down Expand Up @@ -161,6 +164,7 @@ func newControllerConfig() controller.Config {

cfg := controller.Config{
Namespace: namespace,
ClusterWide: clusterWide,
ServiceAccount: serviceAccount,
KubeCli: kubecli,
KubeExtCli: k8sutil.MustNewKubeExtClient(),
Expand Down
26 changes: 26 additions & 0 deletions doc/user/clusterwide.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Manage clusters in all namespaces

Default etcd operator behavior is to only manage etcd clusters created in the same namespace.
It is possible to deploy an etcd operator with special option to manage clusterwide etcd clusters.

## Install etcd operator

etcd operator have to run with `-cluster-wide` arg option.

More information in [install guide](doc/user/install_guide.md).

## Special annotation

To declare an etcd cluster as "clusterwide", you have to add special annotation `etcd.database.coreos.com/scope` with value `clusterwide`.

```yaml
apiVersion: "etcd.database.coreos.com/v1beta2"
kind: "EtcdCluster"
metadata:
name: "example-etcd-cluster"
annotations:
etcd.database.coreos.com/scope: clusterwide
spec:
size: 3
version: "3.2.13"
```
2 changes: 2 additions & 0 deletions example/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ spec:
image: quay.io/coreos/etcd-operator:v0.8.1
command:
- etcd-operator
# Uncomment to act for resources in all namespaces. More information in doc/clusterwide.md
#- -cluster-wide
env:
- name: MY_POD_NAMESPACE
valueFrom:
Expand Down
5 changes: 5 additions & 0 deletions pkg/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ type Controller struct {

type Config struct {
Namespace string
ClusterWide bool
ServiceAccount string
KubeCli kubernetes.Interface
KubeExtCli apiextensionsclient.Interface
Expand All @@ -64,6 +65,10 @@ func New(cfg Config) *Controller {
func (c *Controller) handleClusterEvent(event *Event) error {
clus := event.Object

if !c.managed(clus) {
return fmt.Errorf("cluster (%s) isn't managed", clus.Name)
}

if clus.Status.IsFailed() {
clustersFailed.Inc()
if event.Type == kwatch.Deleted {
Expand Down
68 changes: 65 additions & 3 deletions pkg/controller/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,10 @@ import (
"strings"
"testing"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"

api "github.com/coreos/etcd-operator/pkg/apis/etcd/v1beta2"
"github.com/coreos/etcd-operator/pkg/cluster"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
)

func TestHandleClusterEventUpdateFailedCluster(t *testing.T) {
Expand Down Expand Up @@ -73,3 +72,66 @@ func TestHandleClusterEventDeleteFailedCluster(t *testing.T) {
t.Errorf("failed cluster not cleaned up after delete event, cluster struct: %v", c.clusters[name])
}
}

func TestHandleClusterEventClusterwide(t *testing.T) {
c := New(Config{ClusterWide: true})

clus := &api.EtcdCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Annotations: map[string]string{
"etcd.database.coreos.com/scope": "clusterwide",
},
},
}
e := &Event{
Type: watch.Modified,
Object: clus,
}
err := c.handleClusterEvent(e)
suffix := "isn't managed"
if strings.HasSuffix(err.Error(), suffix) {
t.Errorf("expect err='%s...', get=%v", suffix, err)
}
}

func TestHandleClusterEventClusterwideIgnored(t *testing.T) {
c := New(Config{ClusterWide: true})

clus := &api.EtcdCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
}
e := &Event{
Type: watch.Modified,
Object: clus,
}
err := c.handleClusterEvent(e)
suffix := "isn't managed"
if !strings.HasSuffix(err.Error(), suffix) {
t.Errorf("expect err='%s...', get=%v", suffix, err)
}
}

func TestHandleClusterEventNamespacedIgnored(t *testing.T) {
c := New(Config{})

clus := &api.EtcdCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Annotations: map[string]string{
"etcd.database.coreos.com/scope": "clusterwide",
},
},
}
e := &Event{
Type: watch.Modified,
Object: clus,
}
err := c.handleClusterEvent(e)
suffix := "isn't managed"
if !strings.HasSuffix(err.Error(), suffix) {
t.Errorf("expect err='%s...', get=%v", suffix, err)
}
}
24 changes: 23 additions & 1 deletion pkg/controller/informer.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,10 @@ import (
"time"

api "github.com/coreos/etcd-operator/pkg/apis/etcd/v1beta2"
"github.com/coreos/etcd-operator/pkg/util/constants"
"github.com/coreos/etcd-operator/pkg/util/probe"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
kwatch "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
Expand Down Expand Up @@ -52,10 +54,17 @@ func (c *Controller) Start() error {
}

func (c *Controller) run() {
var ns string
if c.Config.ClusterWide {
ns = metav1.NamespaceAll
} else {
ns = c.Config.Namespace
}

source := cache.NewListWatchFromClient(
c.Config.EtcdCRCli.EtcdV1beta2().RESTClient(),
api.EtcdClusterResourcePlural,
c.Config.Namespace,
ns,
fields.Everything())

_, informer := cache.NewIndexerInformer(source, &api.EtcdCluster{}, 0, cache.ResourceEventHandlerFuncs{
Expand Down Expand Up @@ -131,3 +140,16 @@ func (c *Controller) syncEtcdClus(clus *api.EtcdCluster) {
}
pt.stop()
}

func (c *Controller) managed(clus *api.EtcdCluster) bool {
if v, ok := clus.Annotations[constants.AnnotationScope]; ok {
if c.Config.ClusterWide {
return v == constants.AnnotationClusterWide
}
} else {
if !c.Config.ClusterWide {
return true
}
}
return false
}
3 changes: 3 additions & 0 deletions pkg/util/constants/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,4 +29,7 @@ const (

EnvOperatorPodName = "MY_POD_NAME"
EnvOperatorPodNamespace = "MY_POD_NAMESPACE"

AnnotationScope = "etcd.database.coreos.com/scope"
AnnotationClusterWide = "clusterwide"
)

0 comments on commit 068fd64

Please sign in to comment.