From b55daf7cf163517421432dad78d9c37d6f118841 Mon Sep 17 00:00:00 2001 From: Rebecca Zanzig Date: Fri, 10 Jan 2020 13:48:54 -0800 Subject: [PATCH] Update consul-k8s components to support Consul namespaces This was a major change to the internals of most of the consul-k8s commands. As part of the work, there were other changes that affect folks not using namespaces as well. Details are broken down by process. --> Catalog Sync Namespaces: This allows the catalog sync process to support Consul namespaces, an Enterprise feature. It supports no namespaces (OSS), syncing k8s services into a single Consul namespace and mirroring k8s namespaces in Consul with an optional prefix. Beyond namespaces: It updates the settings for which k8s namespaces to sync. These are now based on allow and deny lists, rather than the two previous options of (1) a single k8s namespace, or (2) all k8s namespaces except `kube-system`. This change is backwards compatible, however if a user upgrades consul-k8s without upgrading the Helm chart as well, there will be a slight difference in behavior for (2) in that it won't automatically exclude `kube-system` on its own. The underlying call to Consul to retrieve services has been switched to retrieve services by the synthetic node `k8s-sync`. This causes a slight behavior change in that we will no longer remove services with the `ConsulK8STag` if it's not attached to the `k8s-sync` node. Fixes a hot loop bug when getting an error from Consul when retrieving service information. Moves `c.sigCh` initialization to the init method to fix a race condition occurring in tests. Adds additional debug logging to resource.go and syncer.go. --> ACL Bootstrapping Namespaces: Updates all policies that are created by the bootstrapper to include namespace permissions as needed. Updates the Connect Injector's AuthMethod to reflect the namespace registration settings (single destination, mirroring, mirroring with prefix). When namespaces are enabled, all policies and tokens for consul-k8s components are being created within the `Consul` default namespace. This is required for any cross-namespace permissions, and in the case of catalog sync and the connect injector, the ability to create Consul namespaces. Additionally, a specific cross-namespace policy is created so that it can be attached to all created namespaces to allow service discovery between Consul namespaces. This makes sure all policies are updated if the acl bootstrapping job is rerun, which happens on a helm upgrade. This allows someone upgrading to a version that includes namespaces or changes their namespacing config to also update the policies associated with their acl tokens to reflect that change. Beyond namespaces: This separates auth method and binding rule checking logic. If it exists already, binding rules are now always updated, which supports config updates. To make it easier to work with the code, it now uses a shared logger and has been split into smaller files. Updates mesh gateway acl policies with the correct permissions --> Connect Injector Namespaces: This adds namespace config options for registering injected services into a single namespace as well as mirroring k8s namespaces in Consul with an optional prefix. It adds functionality to check for Consul namespace existence and create new namespaces. Service and proxy registration as well as service-defaults have been updated to be namespace aware. Adds additional parsing of the upstream annotation to support namespaces. The format of the annotation becomes: `service_name.namespace:port:optional_datacenter` The `service_name.namespace` is only parsed if namespaces are enabled. If someone has added a `.namespace` in that case, the upstream will not work correctly, as is the case where someone has put in an incorrect service name, port or datacenter. The upstream definitions in the service registration file includes the namespace from the annotation. If it wasn't present in the annotation, no namespace is included. This will automatically fallback to assuming the service is in the same namespace as the service defining the upstream. Beyond namespaces: Updates the default envoy version to 1.13.0. --- .circleci/config.yml | 35 +- Makefile | 4 + .../to-consul/consul_node_services_client.go | 114 +++ .../consul_node_services_client_ent_test.go | 362 +++++++++ .../consul_node_services_client_test.go | 184 +++++ catalog/to-consul/resource.go | 111 ++- catalog/to-consul/resource_test.go | 626 ++++++++------- catalog/to-consul/syncer.go | 342 +++++--- catalog/to-consul/syncer_ent_test.go | 178 +++++ catalog/to-consul/syncer_test.go | 242 +++--- connect-inject/container_init.go | 97 ++- connect-inject/container_init_test.go | 584 +++++++++++++- connect-inject/envoy_sidecar.go | 21 +- connect-inject/envoy_sidecar_test.go | 74 +- connect-inject/handler.go | 169 +++- connect-inject/handler_ent_test.go | 511 ++++++++++++ connect-inject/handler_test.go | 303 +++++++- go.mod | 10 +- go.sum | 80 +- subcommand/inject-connect/command.go | 148 +++- subcommand/inject-connect/command_test.go | 2 +- subcommand/lifecycle-sidecar/command.go | 52 +- .../lifecycle-sidecar/command_ent_test.go | 89 +++ subcommand/lifecycle-sidecar/command_test.go | 77 +- subcommand/server-acl-init/command.go | 706 ++++------------- .../server-acl-init/command_ent_test.go | 583 ++++++++++++++ subcommand/server-acl-init/command_test.go | 332 +++++--- subcommand/server-acl-init/connect_inject.go | 219 ++++++ .../server-acl-init/create_or_update.go | 157 ++++ subcommand/server-acl-init/dns.go | 43 + subcommand/server-acl-init/rules.go | 176 +++++ subcommand/server-acl-init/rules_test.go | 299 +++++++ subcommand/server-acl-init/servers.go | 258 ++++++ subcommand/sync-catalog/command.go | 145 +++- subcommand/sync-catalog/command_ent_test.go | 733 ++++++++++++++++++ subcommand/sync-catalog/command_test.go | 303 ++++++++ 36 files changed, 6959 insertions(+), 1410 deletions(-) create mode 100644 catalog/to-consul/consul_node_services_client.go create mode 100644 catalog/to-consul/consul_node_services_client_ent_test.go create mode 100644 catalog/to-consul/consul_node_services_client_test.go create mode 100644 catalog/to-consul/syncer_ent_test.go create mode 100644 connect-inject/handler_ent_test.go create mode 100644 subcommand/lifecycle-sidecar/command_ent_test.go create mode 100644 subcommand/server-acl-init/command_ent_test.go create mode 100644 subcommand/server-acl-init/connect_inject.go create mode 100644 subcommand/server-acl-init/create_or_update.go create mode 100644 subcommand/server-acl-init/dns.go create mode 100644 subcommand/server-acl-init/rules.go create mode 100644 subcommand/server-acl-init/rules_test.go create mode 100644 subcommand/server-acl-init/servers.go create mode 100644 subcommand/sync-catalog/command_ent_test.go diff --git a/.circleci/config.yml b/.circleci/config.yml index c63bd00acf..356055de29 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,7 +6,8 @@ executors: - image: circleci/golang:1.13 environment: - TEST_RESULTS: /tmp/test-results # path to where test results are saved - - CONSUL_VERSION: 1.6.3 # this can be OSS or enterprise, e.g., 1.7.0+ent-beta4 + - CONSUL_VERSION: 1.7.0 # Consul's OSS version to use in tests + - CONSUL_ENT_VERSION: 1.7.0+ent # Consul's enterprise version to use in tests jobs: go-fmt-and-vet: @@ -67,6 +68,34 @@ jobs: - store_artifacts: path: /tmp/test-results + test_enterprise: + executor: go + environment: + TEST_RESULTS: /tmp/test-results + parallelism: 1 + steps: + - checkout + - run: mkdir -p $TEST_RESULTS + + # Restore go module cache if there is one + - restore_cache: + keys: + - consul-k8s-modcache-v1-{{ checksum "go.mod" }} + + # run go tests with gotestsum + - run: | + # download and install the consul binary + wget https://releases.hashicorp.com/consul/"${CONSUL_ENT_VERSION}"/consul_"${CONSUL_ENT_VERSION}"_linux_amd64.zip && \ + unzip consul_"${CONSUL_ENT_VERSION}"_linux_amd64.zip -d /home/circleci/bin && + rm consul_"${CONSUL_ENT_VERSION}"_linux_amd64.zip + - run: | + PACKAGE_NAMES=$(go list ./...) + gotestsum --junitfile $TEST_RESULTS/gotestsum-report.xml -- -tags=enterprise -p 4 $PACKAGE_NAMES + - store_test_results: + path: /tmp/test-results + - store_artifacts: + path: /tmp/test-results + build-distros: executor: go environment: @@ -94,6 +123,10 @@ workflows: - test: requires: - go-fmt-and-vet + - test_enterprise: + requires: + - go-fmt-and-vet - build-distros: requires: - test + - test_enterprise diff --git a/Makefile b/Makefile index 94218e85a2..cefbad8be8 100644 --- a/Makefile +++ b/Makefile @@ -86,6 +86,10 @@ dev-tree: test: go test ./... +# requires a consul enterprise binary on the path +ent-test: + go test ./... -tags=enterprise + cov: go test ./... -coverprofile=coverage.out go tool cover -html=coverage.out diff --git a/catalog/to-consul/consul_node_services_client.go b/catalog/to-consul/consul_node_services_client.go new file mode 100644 index 0000000000..1e3d01003f --- /dev/null +++ b/catalog/to-consul/consul_node_services_client.go @@ -0,0 +1,114 @@ +package catalog + +import ( + "fmt" + + "github.com/hashicorp/consul/api" +) + +// ConsulService is service registered in Consul. +type ConsulService struct { + // Namespace is the Consul namespace the service is registered in. + // If namespaces are disabled this will always be the empty string even + // though the namespace is technically "default". + Namespace string + // Name is the name of the service in Consul. + Name string +} + +// ConsulNodeServicesClient is used to query for node services. +type ConsulNodeServicesClient interface { + // NodeServices returns consul services with the corresponding tag + // registered to the Consul node with nodeName. opts is used as the + // query options in the API call to consul. It returns the list of services + // (not service instances) and the query meta from the API call. + NodeServices(tag string, nodeName string, opts api.QueryOptions) ([]ConsulService, *api.QueryMeta, error) +} + +// PreNamespacesNodeServicesClient implements ConsulNodeServicesClient +// for Consul < 1.7 which does not support namespaces. +type PreNamespacesNodeServicesClient struct { + Client *api.Client +} + +// NodeServices returns Consul services tagged with +// tag registered on nodeName using a Consul API that is supported in +// Consul versions before 1.7. Consul versions after 1.7 still support +// this API but the API is not namespace-aware. +func (s *PreNamespacesNodeServicesClient) NodeServices( + tag string, + nodeName string, + opts api.QueryOptions) ([]ConsulService, *api.QueryMeta, error) { + // NOTE: We're not using tag filtering here so we can support Consul + // < 1.5. + node, meta, err := s.Client.Catalog().Node(nodeName, &opts) + if err != nil { + return nil, nil, err + } + if node == nil { + return nil, meta, nil + } + + var svcs []ConsulService + // seenServices is used to ensure the svcs list is unique. + seenServices := make(map[string]bool) + for _, svcInstance := range node.Services { + svcName := svcInstance.Service + if _, ok := seenServices[svcName]; ok { + continue + } + for _, svcTag := range svcInstance.Tags { + if svcTag == tag { + if _, ok := seenServices[svcName]; !ok { + svcs = append(svcs, ConsulService{ + // If namespaces are not enabled we use empty + // string. + Namespace: "", + Name: svcName, + }) + seenServices[svcName] = true + } + break + } + } + } + return svcs, meta, nil +} + +// NamespacesNodeServicesClient implements ConsulNodeServicesClient +// for Consul >= 1.7 which supports namespaces. +type NamespacesNodeServicesClient struct { + Client *api.Client +} + +// NodeServices returns Consul services tagged with +// tag registered on nodeName using a Consul API that is supported in +// Consul versions >= 1.7. If opts.Namespace is set to +// "*", services from all namespaces will be returned. +func (s *NamespacesNodeServicesClient) NodeServices( + tag string, + nodeName string, + opts api.QueryOptions) ([]ConsulService, *api.QueryMeta, error) { + opts.Filter = fmt.Sprintf("\"%s\" in Tags", tag) + nodeCatalog, meta, err := s.Client.Catalog().NodeServiceList(nodeName, &opts) + if err != nil { + return nil, nil, err + } + + var svcs []ConsulService + // seenServices is used to ensure the svcs list is unique. Its keys are + // /. + seenSvcs := make(map[string]bool) + for _, svcInstance := range nodeCatalog.Services { + svcName := svcInstance.Service + key := fmt.Sprintf("%s/%s", svcInstance.Namespace, svcName) + if _, ok := seenSvcs[key]; !ok { + svcs = append(svcs, ConsulService{ + Namespace: svcInstance.Namespace, + Name: svcName, + }) + seenSvcs[key] = true + } + } + return svcs, meta, nil +} diff --git a/catalog/to-consul/consul_node_services_client_ent_test.go b/catalog/to-consul/consul_node_services_client_ent_test.go new file mode 100644 index 0000000000..d4238e9118 --- /dev/null +++ b/catalog/to-consul/consul_node_services_client_ent_test.go @@ -0,0 +1,362 @@ +// +build enterprise + +package catalog + +import ( + "testing" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/require" +) + +// Test the Consul 1.7 client against Consul Enterprise. +func TestNamespacesNodeServicesClient_NodeServices(t *testing.T) { + t.Parallel() + cases := map[string]struct { + ConsulServices []api.CatalogRegistration + Exp []ConsulService + }{ + "no services": { + ConsulServices: nil, + Exp: nil, + }, + "no services on k8s node": { + ConsulServices: []api.CatalogRegistration{ + { + Node: "not-k8s", + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + }, + }, + }, + Exp: nil, + }, + "service with k8s tag on different node": { + ConsulServices: []api.CatalogRegistration{ + { + Node: "not-k8s", + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: nil, + }, + "service on k8s node without any tags": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: nil, + }, + }, + }, + Exp: nil, + }, + "service on k8s node without k8s tag": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"not-k8s", "foo"}, + }, + }, + }, + Exp: nil, + }, + "service on k8s node with k8s tag": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + }, + }, + "multiple services": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc1-id", + Service: "svc1", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id2", + Service: "svc2", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc1", + }, + { + Namespace: "default", + Name: "svc2", + }, + }, + }, + "multiple service instances": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id2", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + }, + }, + "services across multiple namespaces": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-ns-id", + Service: "svc-ns", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + { + Namespace: "ns", + Name: "svc-ns", + }, + }, + }, + "services with same name across multiple namespaces": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + { + Namespace: "ns", + Name: "svc", + }, + }, + }, + "multiple services across multiple namespaces": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id2", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id2", + Service: "svc", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id1", + Service: "svc2", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id2", + Service: "svc2", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id1", + Service: "svc2", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id2", + Service: "svc2", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + { + Namespace: "default", + Name: "svc2", + }, + { + Namespace: "ns", + Name: "svc", + }, + { + Namespace: "ns", + Name: "svc2", + }, + }, + }, + } + + for name, c := range cases { + if name != "multiple services across multiple namespaces" { + continue + } + t.Run(name, func(tt *testing.T) { + require := require.New(tt) + svr, err := testutil.NewTestServerT(tt) + require.NoError(err) + defer svr.Stop() + + consulClient, err := api.NewClient(&api.Config{ + Address: svr.HTTPAddr, + }) + require.NoError(err) + for _, registration := range c.ConsulServices { + if registration.Service.Namespace != "" && registration.Service.Namespace != "default" { + _, _, err = consulClient.Namespaces().Create(&api.Namespace{ + Name: registration.Service.Namespace, + }, nil) + require.NoError(err) + } + _, err = consulClient.Catalog().Register(®istration, nil) + require.NoError(err) + } + + client := NamespacesNodeServicesClient{ + Client: consulClient, + } + svcs, _, err := client.NodeServices("k8s", ConsulSyncNodeName, api.QueryOptions{ + Namespace: "*", + }) + require.NoError(err) + require.Len(svcs, len(c.Exp)) + for _, expSvc := range c.Exp { + require.Contains(svcs, expSvc) + } + }) + } +} diff --git a/catalog/to-consul/consul_node_services_client_test.go b/catalog/to-consul/consul_node_services_client_test.go new file mode 100644 index 0000000000..58ba67b70f --- /dev/null +++ b/catalog/to-consul/consul_node_services_client_test.go @@ -0,0 +1,184 @@ +package catalog + +import ( + "testing" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/require" +) + +func TestPreNamespacesNodeServicesClient_NodeServices(t *testing.T) { + t.Parallel() + cases := map[string]struct { + ConsulServices []api.CatalogRegistration + Exp []ConsulService + }{ + "no services": { + ConsulServices: nil, + Exp: nil, + }, + "no services on k8s node": { + ConsulServices: []api.CatalogRegistration{ + { + Node: "not-k8s", + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + }, + }, + }, + Exp: nil, + }, + "service with k8s tag on different node": { + ConsulServices: []api.CatalogRegistration{ + { + Node: "not-k8s", + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: nil, + }, + "service on k8s node without any tags": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: nil, + }, + }, + }, + Exp: nil, + }, + "service on k8s node without k8s tag": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"not-k8s", "foo"}, + }, + }, + }, + Exp: nil, + }, + "service on k8s node with k8s tag": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "", + Name: "svc", + }, + }, + }, + "multiple services": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc1-id", + Service: "svc1", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id2", + Service: "svc2", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "", + Name: "svc1", + }, + { + Namespace: "", + Name: "svc2", + }, + }, + }, + "multiple service instances": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id2", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "", + Name: "svc", + }, + }, + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + require := require.New(tt) + svr, err := testutil.NewTestServerT(tt) + require.NoError(err) + defer svr.Stop() + + consulClient, err := api.NewClient(&api.Config{ + Address: svr.HTTPAddr, + }) + require.NoError(err) + for _, registration := range c.ConsulServices { + _, err = consulClient.Catalog().Register(®istration, nil) + require.NoError(err) + } + + client := PreNamespacesNodeServicesClient{ + Client: consulClient, + } + svcs, _, err := client.NodeServices("k8s", ConsulSyncNodeName, api.QueryOptions{}) + require.NoError(err) + require.Len(svcs, len(c.Exp)) + for _, expSvc := range c.Exp { + require.Contains(svcs, expSvc) + } + }) + } +} diff --git a/catalog/to-consul/resource.go b/catalog/to-consul/resource.go index 3ffc29b5e4..921b130ab7 100644 --- a/catalog/to-consul/resource.go +++ b/catalog/to-consul/resource.go @@ -6,6 +6,7 @@ import ( "strings" "sync" + "github.com/deckarep/golang-set" "github.com/hashicorp/consul-k8s/helper/controller" consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/go-hclog" @@ -46,10 +47,21 @@ const ( // ServiceResource implements controller.Resource to sync Service resource // types from K8S. type ServiceResource struct { - Log hclog.Logger - Client kubernetes.Interface - Syncer Syncer - Namespace string // K8S namespace to watch + Log hclog.Logger + Client kubernetes.Interface + Syncer Syncer + + // AllowK8sNamespacesSet is a set of k8s namespaces to explicitly allow for + // syncing. It supports the special character `*` which indicates that + // all k8s namespaces are eligible unless explicitly denied. This filter + // is applied before checking pod annotations. + AllowK8sNamespacesSet mapset.Set + + // DenyK8sNamespacesSet is a set of k8s namespaces to explicitly deny + // syncing and thus service registration with Consul. An empty set + // means that no namespaces are removed from consideration. This filter + // takes precedence over AllowK8sNamespacesSet. + DenyK8sNamespacesSet mapset.Set // ConsulK8STag is the tag value for services registered. ConsulK8STag string @@ -77,6 +89,28 @@ type ServiceResource struct { // as 'foo-default'. AddK8SNamespaceSuffix bool + // EnableNamespaces indicates that a user is running Consul Enterprise + // with version 1.7+ which is namespace aware. It enables Consul namespaces, + // with syncing into either a single Consul namespace or mirrored from + // k8s namespaces. + EnableNamespaces bool + + // ConsulDestinationNamespace is the name of the Consul namespace to register all + // synced services into if Consul namespaces are enabled and mirroring + // is disabled. This will not be used if mirroring is enabled. + ConsulDestinationNamespace string + + // EnableK8SNSMirroring causes Consul namespaces to be created to match the + // organization within k8s. Services are registered into the Consul + // namespace that mirrors their k8s namespace. + EnableK8SNSMirroring bool + + // K8SNSMirroringPrefix is an optional prefix that can be added to the Consul + // namespaces created while mirroring. For example, if it is set to "k8s-", + // then the k8s `default` namespace will be mirrored in Consul's + // `k8s-default` namespace. + K8SNSMirroringPrefix string + // serviceLock must be held for any read/write to these maps. serviceLock sync.RWMutex @@ -96,14 +130,16 @@ type ServiceResource struct { // Informer implements the controller.Resource interface. func (t *ServiceResource) Informer() cache.SharedIndexInformer { + // Watch all k8s namespaces. Events will be filtered out as appropriate + // based on the allow and deny lists in the `shouldSync` function. return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return t.Client.CoreV1().Services(t.namespace()).List(options) + return t.Client.CoreV1().Services(metav1.NamespaceAll).List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return t.Client.CoreV1().Services(t.namespace()).Watch(options) + return t.Client.CoreV1().Services(metav1.NamespaceAll).Watch(options) }, }, &apiv1.Service{}, @@ -134,13 +170,14 @@ func (t *ServiceResource) Upsert(key string, raw interface{}) error { t.Log.Info("service should no longer be synced", "service", key) t.doDelete(key) } else { - t.Log.Debug("syncing disabled for service, ignoring", "key", key) + t.Log.Debug("[ServiceResource.Upsert] syncing disabled for service, ignoring", "key", key) } return nil } // Syncing is enabled, let's keep track of this service. t.serviceMap[key] = service + t.Log.Debug("[ServiceResource.Upsert] adding service to serviceMap", "key", key, "service", service) // If we care about endpoints, we should do the initial endpoints load. if t.shouldTrackEndpoints(key) { @@ -156,6 +193,7 @@ func (t *ServiceResource) Upsert(key string, raw interface{}) error { t.endpointsMap = make(map[string]*apiv1.Endpoints) } t.endpointsMap[key] = endpoints + t.Log.Debug("[ServiceResource.Upsert] adding service's endpoints to endpointsMap", "key", key, "service", service, "endpoints", endpoints) } } @@ -180,7 +218,9 @@ func (t *ServiceResource) Delete(key string) error { // Precondition: assumes t.serviceLock is held func (t *ServiceResource) doDelete(key string) { delete(t.serviceMap, key) + t.Log.Debug("[doDelete] deleting service from serviceMap", "key", key) delete(t.endpointsMap, key) + t.Log.Debug("[doDelete] deleting endpoints from endpointsMap", "key", key) // If there were registrations related to this service, then // delete them and sync. if _, ok := t.consulMap[key]; ok { @@ -200,17 +240,22 @@ func (t *ServiceResource) Run(ch <-chan struct{}) { // shouldSync returns true if resyncing should be enabled for the given service. func (t *ServiceResource) shouldSync(svc *apiv1.Service) bool { - // If we're listening on all namespaces, we explicitly ignore the - // system namespace. The user can explicitly enable this by starting - // a sync for that namespace. - if t.namespace() == metav1.NamespaceAll && svc.Namespace == metav1.NamespaceSystem { - t.Log.Debug("ignoring system service since we're listening on all namespaces", - "service-name", t.addPrefixAndK8SNamespace(svc.Name, svc.Namespace)) + // Namespace logic + // If in deny list, don't sync + if t.DenyK8sNamespacesSet.Contains(svc.Namespace) { + t.Log.Debug("[shouldSync] service is in the deny list", "svc.Namespace", svc.Namespace, "service", svc) + return false + } + + // If not in allow list or allow list is not *, don't sync + if !t.AllowK8sNamespacesSet.Contains("*") && !t.AllowK8sNamespacesSet.Contains(svc.Namespace) { + t.Log.Debug("[shouldSync] service not in allow list", "svc.Namespace", svc.Namespace, "service", svc) return false } // Ignore ClusterIP services if ClusterIP sync is disabled if svc.Spec.Type == apiv1.ServiceTypeClusterIP && !t.ClusterIPSync { + t.Log.Debug("[shouldSync] ignoring clusterip service", "svc.Namespace", svc.Namespace, "service", svc) return false } @@ -265,6 +310,8 @@ func (t *ServiceResource) generateRegistrations(key string) { return } + t.Log.Debug("[generateRegistrations] generating registration", "key", key) + // Initialize our consul service map here if it isn't already. if t.consulMap == nil { t.consulMap = make(map[string][]*consulapi.CatalogRegistration) @@ -279,7 +326,7 @@ func (t *ServiceResource) generateRegistrations(key string) { // shallow copied for each instance. baseNode := consulapi.CatalogRegistration{ SkipNodeUpdate: true, - Node: "k8s-sync", + Node: ConsulSyncNodeName, Address: "127.0.0.1", NodeMeta: map[string]string{ ConsulSourceKey: ConsulSourceValue, @@ -291,7 +338,7 @@ func (t *ServiceResource) generateRegistrations(key string) { Tags: []string{t.ConsulK8STag}, Meta: map[string]string{ ConsulSourceKey: ConsulSourceValue, - ConsulK8SNS: t.namespace(), + ConsulK8SNS: svc.Namespace, }, } @@ -300,6 +347,22 @@ func (t *ServiceResource) generateRegistrations(key string) { baseService.Service = strings.TrimSpace(v) } + // Update the Consul namespace based on namespace settings + if t.EnableNamespaces { + var ns string + + // Mirroring takes precedence + if t.EnableK8SNSMirroring { + ns = fmt.Sprintf("%s%s", t.K8SNSMirroringPrefix, svc.Namespace) + } else { + ns = t.ConsulDestinationNamespace + } + t.Log.Debug("[generateRegistrations] namespace being used", "key", key, "namespace", ns) + + // Update baseService to have a Consul namespace + baseService.Namespace = ns + } + // Determine the default port and set port annotations var overridePortName string var overridePortNumber int @@ -382,6 +445,7 @@ func (t *ServiceResource) generateRegistrations(key string) { t.Log.Debug("generated registration", "key", key, "service", baseService.Service, + "namespace", baseService.Namespace, "instances", len(t.consulMap[key])) }() @@ -580,15 +644,6 @@ func (t *ServiceResource) sync() { t.Syncer.Sync(rs) } -// namespace returns the K8S namespace to setup the resource watchers in. -func (t *ServiceResource) namespace() string { - if t.Namespace != "" { - return t.Namespace - } - - return metav1.NamespaceAll -} - // serviceEndpointsResource implements controller.Resource and starts // a background watcher on endpoints that is used by the ServiceResource // to keep track of changing endpoints for registered services. @@ -597,17 +652,21 @@ type serviceEndpointsResource struct { } func (t *serviceEndpointsResource) Informer() cache.SharedIndexInformer { + // Watch all k8s namespaces. Events will be filtered out as appropriate in the + // `shouldTrackEndpoints` function which checks whether the service is marked + // to be tracked by the `shouldSync` function which uses the allow and deny + // namespace lists. return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return t.Service.Client.CoreV1(). - Endpoints(t.Service.namespace()). + Endpoints(metav1.NamespaceAll). List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return t.Service.Client.CoreV1(). - Endpoints(t.Service.namespace()). + Endpoints(metav1.NamespaceAll). Watch(options) }, }, diff --git a/catalog/to-consul/resource_test.go b/catalog/to-consul/resource_test.go index cabf23bb5a..1d86f844e1 100644 --- a/catalog/to-consul/resource_test.go +++ b/catalog/to-consul/resource_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/deckarep/golang-set" "github.com/hashicorp/consul-k8s/helper/controller" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/go-hclog" @@ -11,6 +12,7 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ) @@ -21,28 +23,21 @@ func init() { hclog.DefaultOptions.Level = hclog.Debug } -func TestServiceResource_impl(t *testing.T) { - var _ controller.Resource = &ServiceResource{} - var _ controller.Backgrounder = &ServiceResource{} -} - // Test that deleting a service properly deletes the registration. func TestServiceResource_createDelete(t *testing.T) { t.Parallel() require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(lbService("foo", "1.2.3.4")) + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) // Delete @@ -62,17 +57,15 @@ func TestServiceResource_defaultEnable(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(lbService("foo", "1.2.3.4")) + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) time.Sleep(200 * time.Millisecond) @@ -89,17 +82,14 @@ func TestServiceResource_defaultEnableDisable(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") svc.Annotations[annotationServiceSync] = "false" _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) @@ -118,18 +108,15 @@ func TestServiceResource_defaultDisable(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ExplicitEnable = true // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - ExplicitEnable: true, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) time.Sleep(200 * time.Millisecond) @@ -147,18 +134,15 @@ func TestServiceResource_defaultDisableEnable(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ExplicitEnable = true // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - ExplicitEnable: true, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") svc.Annotations[annotationServiceSync] = "t" _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) @@ -171,51 +155,20 @@ func TestServiceResource_defaultDisableEnable(t *testing.T) { require.Len(actual, 1) } -// Test that system resources are not synced by default. -func TestServiceResource_system(t *testing.T) { - t.Parallel() - require := require.New(t) - client := fake.NewSimpleClientset() - syncer := &TestSyncer{} - - // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - }) - defer closer() - - // Insert an LB service - svc := lbService("foo", "1.2.3.4") - _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(svc) - require.NoError(err) - time.Sleep(200 * time.Millisecond) - - // Verify what we got - syncer.Lock() - defer syncer.Unlock() - actual := syncer.Registrations - require.Len(actual, 0) -} - // Test changing the sync tag to false deletes the service. func TestServiceResource_changeSyncToFalse(t *testing.T) { t.Parallel() client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ExplicitEnable = true // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - ExplicitEnable: true, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service with the sync=true - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") svc.Annotations[annotationServiceSync] = "true" _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(t, err) @@ -248,18 +201,15 @@ func TestServiceResource_addK8SNamespace(t *testing.T) { t.Parallel() client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.AddK8SNamespaceSuffix = true // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - AddK8SNamespaceSuffix: true, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service with the sync=true - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", "namespace", "1.2.3.4") _, err := client.CoreV1().Services("namespace").Create(svc) require.NoError(t, err) @@ -269,7 +219,7 @@ func TestServiceResource_addK8SNamespace(t *testing.T) { defer syncer.Unlock() actual := syncer.Registrations require.Len(r, actual, 1) - require.Equal(t, actual[0].Service.Service, "foo-namespace") + require.Equal(r, actual[0].Service.Service, "foo-namespace") }) } @@ -279,19 +229,16 @@ func TestServiceResource_addK8SNamespaceWithPrefix(t *testing.T) { t.Parallel() client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.AddK8SNamespaceSuffix = true + serviceResource.ConsulServicePrefix = "prefix" // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - AddK8SNamespaceSuffix: true, - ConsulServicePrefix: "prefix", - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service with the sync=true - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", "namespace", "1.2.3.4") _, err := client.CoreV1().Services("namespace").Create(svc) require.NoError(t, err) @@ -301,7 +248,7 @@ func TestServiceResource_addK8SNamespaceWithPrefix(t *testing.T) { defer syncer.Unlock() actual := syncer.Registrations require.Len(r, actual, 1) - require.Equal(t, actual[0].Service.Service, "prefixfoo-namespace") + require.Equal(r, actual[0].Service.Service, "prefixfoo-namespace") }) } @@ -311,18 +258,15 @@ func TestServiceResource_addK8SNamespaceWithNameAnnotation(t *testing.T) { t.Parallel() client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.AddK8SNamespaceSuffix = true // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - AddK8SNamespaceSuffix: true, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service with the sync=true - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", "bar", "1.2.3.4") svc.Annotations[annotationServiceName] = "different-service-name" _, err := client.CoreV1().Services("bar").Create(svc) require.NoError(t, err) @@ -333,7 +277,7 @@ func TestServiceResource_addK8SNamespaceWithNameAnnotation(t *testing.T) { defer syncer.Unlock() actual := syncer.Registrations require.Len(r, actual, 1) - require.Equal(t, actual[0].Service.Service, "different-service-name") + require.Equal(r, actual[0].Service.Service, "different-service-name") }) } @@ -343,17 +287,14 @@ func TestServiceResource_externalIP(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") svc.Spec.ExternalIPs = []string{"3.3.3.3", "4.4.4.4"} _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) @@ -378,18 +319,15 @@ func TestServiceResource_externalIPPrefix(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ConsulServicePrefix = "prefix" // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - ConsulServicePrefix: "prefix", - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") svc.Spec.ExternalIPs = []string{"3.3.3.3", "4.4.4.4"} _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) @@ -414,17 +352,15 @@ func TestServiceResource_lb(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(lbService("foo", "1.2.3.4")) + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) // Wait a bit @@ -445,18 +381,16 @@ func TestServiceResource_lbPrefix(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ConsulServicePrefix = "prefix" // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - ConsulServicePrefix: "prefix", - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(lbService("foo", "1.2.3.4")) + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) // Wait a bit @@ -478,17 +412,14 @@ func TestServiceResource_lbMultiEndpoint(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") svc.Status.LoadBalancer.Ingress = append( svc.Status.LoadBalancer.Ingress, apiv1.LoadBalancerIngress{IP: "2.3.4.5"}, @@ -517,17 +448,14 @@ func TestServiceResource_lbAnnotatedName(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") svc.Annotations[annotationServiceName] = "bar" _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) @@ -547,17 +475,14 @@ func TestServiceResource_lbPort(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") svc.Spec.Ports = []apiv1.ServicePort{ {Name: "http", Port: 80, TargetPort: intstr.FromInt(8080)}, {Name: "rpc", Port: 8500, TargetPort: intstr.FromInt(2000)}, @@ -582,17 +507,14 @@ func TestServiceResource_lbAnnotatedPort(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") svc.Annotations[annotationServicePort] = "rpc" svc.Spec.Ports = []apiv1.ServicePort{ {Name: "http", Port: 80, TargetPort: intstr.FromInt(8080)}, @@ -618,18 +540,15 @@ func TestServiceResource_lbAnnotatedTags(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ConsulK8STag = TestConsulK8STag // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - ConsulK8STag: TestConsulK8STag, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") svc.Annotations[annotationServiceTags] = "one, two,three" _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) @@ -649,17 +568,14 @@ func TestServiceResource_lbAnnotatedMeta(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert an LB service - svc := lbService("foo", "1.2.3.4") + svc := lbService("foo", metav1.NamespaceDefault, "1.2.3.4") svc.Annotations[annotationServiceMetaPrefix+"foo"] = "bar" _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) @@ -679,14 +595,11 @@ func TestServiceResource_nodePort(t *testing.T) { require := require.New(t) syncer := &TestSyncer{} client := fake.NewSimpleClientset() + serviceResource := defaultServiceResource(client, syncer) + serviceResource.NodePortSync = ExternalOnly // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - NodePortSync: ExternalOnly, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() createNodes(t, client) @@ -694,7 +607,8 @@ func TestServiceResource_nodePort(t *testing.T) { createEndpoints(t, client, "foo", metav1.NamespaceDefault) // Insert the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(nodePortService("foo")) + svc := nodePortService("foo", metav1.NamespaceDefault) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) time.Sleep(200 * time.Millisecond) @@ -721,15 +635,12 @@ func TestServiceResource_nodePortPrefix(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.NodePortSync = ExternalOnly + serviceResource.ConsulServicePrefix = "prefix" // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - NodePortSync: ExternalOnly, - ConsulServicePrefix: "prefix", - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() createNodes(t, client) @@ -737,7 +648,8 @@ func TestServiceResource_nodePortPrefix(t *testing.T) { createEndpoints(t, client, "foo", metav1.NamespaceDefault) // Insert the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(nodePortService("foo")) + svc := nodePortService("foo", metav1.NamespaceDefault) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) // Wait a bit @@ -766,14 +678,11 @@ func TestServiceResource_nodePort_singleEndpoint(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.NodePortSync = ExternalOnly // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - NodePortSync: ExternalOnly, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() node1, _ := createNodes(t, client) @@ -799,7 +708,8 @@ func TestServiceResource_nodePort_singleEndpoint(t *testing.T) { require.NoError(err) // Insert the service - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(nodePortService("foo")) + svc := nodePortService("foo", metav1.NamespaceDefault) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) // Wait a bit @@ -822,14 +732,11 @@ func TestServiceResource_nodePortAnnotatedPort(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.NodePortSync = ExternalOnly // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - NodePortSync: ExternalOnly, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() createNodes(t, client) @@ -837,7 +744,7 @@ func TestServiceResource_nodePortAnnotatedPort(t *testing.T) { createEndpoints(t, client, "foo", metav1.NamespaceDefault) // Insert the service - svc := nodePortService("foo") + svc := nodePortService("foo", metav1.NamespaceDefault) svc.Annotations = map[string]string{annotationServicePort: "rpc"} _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) @@ -867,14 +774,11 @@ func TestServiceResource_nodePortUnnamedPort(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.NodePortSync = ExternalOnly // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - NodePortSync: ExternalOnly, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() createNodes(t, client) @@ -882,7 +786,7 @@ func TestServiceResource_nodePortUnnamedPort(t *testing.T) { createEndpoints(t, client, "foo", metav1.NamespaceDefault) // Insert the service - svc := nodePortService("foo") + svc := nodePortService("foo", metav1.NamespaceDefault) // Override service ports svc.Spec.Ports = []apiv1.ServicePort{ {Port: 80, TargetPort: intstr.FromInt(8080), NodePort: 30000}, @@ -917,14 +821,11 @@ func TestServiceResource_nodePort_internalOnlySync(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.NodePortSync = InternalOnly // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - NodePortSync: InternalOnly, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() createNodes(t, client) @@ -932,7 +833,8 @@ func TestServiceResource_nodePort_internalOnlySync(t *testing.T) { createEndpoints(t, client, "foo", metav1.NamespaceDefault) // Insert the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(nodePortService("foo")) + svc := nodePortService("foo", metav1.NamespaceDefault) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) // Wait a bit @@ -961,14 +863,11 @@ func TestServiceResource_nodePort_externalFirstSync(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.NodePortSync = ExternalFirst // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - NodePortSync: ExternalFirst, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() node1, _ := createNodes(t, client) @@ -984,7 +883,8 @@ func TestServiceResource_nodePort_externalFirstSync(t *testing.T) { createEndpoints(t, client, "foo", metav1.NamespaceDefault) // Insert the service - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(nodePortService("foo")) + svc := nodePortService("foo", metav1.NamespaceDefault) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) // Wait a bit @@ -1012,18 +912,16 @@ func TestServiceResource_clusterIP(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ClusterIPSync = true // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - ClusterIPSync: true, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(clusterIPService("foo")) + svc := clusterIPService("foo", metav1.NamespaceDefault) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) // Insert the endpoints @@ -1052,19 +950,17 @@ func TestServiceResource_clusterIPPrefix(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ClusterIPSync = true + serviceResource.ConsulServicePrefix = "prefix" // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - ClusterIPSync: true, - ConsulServicePrefix: "prefix", - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(clusterIPService("foo")) + svc := clusterIPService("foo", metav1.NamespaceDefault) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) // Insert the endpoints @@ -1094,18 +990,15 @@ func TestServiceResource_clusterIPAnnotatedPortName(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ClusterIPSync = true // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - ClusterIPSync: true, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert the service - svc := clusterIPService("foo") + svc := clusterIPService("foo", metav1.NamespaceDefault) svc.Annotations[annotationServicePort] = "rpc" _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) @@ -1137,18 +1030,15 @@ func TestServiceResource_clusterIPAnnotatedPortNumber(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ClusterIPSync = true // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - ClusterIPSync: true, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert the service - svc := clusterIPService("foo") + svc := clusterIPService("foo", metav1.NamespaceDefault) svc.Annotations[annotationServicePort] = "4141" _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) @@ -1179,18 +1069,15 @@ func TestServiceResource_clusterIPUnnamedPorts(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ClusterIPSync = true // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - ClusterIPSync: true, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert the service - svc := clusterIPService("foo") + svc := clusterIPService("foo", metav1.NamespaceDefault) svc.Spec.Ports = []apiv1.ServicePort{ {Port: 80, TargetPort: intstr.FromInt(8080)}, {Port: 8500, TargetPort: intstr.FromInt(2000)}, @@ -1225,18 +1112,16 @@ func TestServiceResource_clusterIPSyncDisabled(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ClusterIPSync = false // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - ClusterIPSync: false, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(clusterIPService("foo")) + svc := clusterIPService("foo", metav1.NamespaceDefault) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc) require.NoError(err) // Insert the endpoints @@ -1259,19 +1144,16 @@ func TestServiceResource_clusterIPAllNamespaces(t *testing.T) { client := fake.NewSimpleClientset() syncer := &TestSyncer{} testNamespace := "test_namespace" + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ClusterIPSync = true // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - Namespace: apiv1.NamespaceAll, - ClusterIPSync: true, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert the service - _, err := client.CoreV1().Services(testNamespace).Create(clusterIPService("foo")) + svc := clusterIPService("foo", testNamespace) + _, err := client.CoreV1().Services(testNamespace).Create(svc) require.NoError(err) // Insert the endpoints @@ -1300,18 +1182,15 @@ func TestServiceResource_clusterIPTargetPortNamed(t *testing.T) { require := require.New(t) client := fake.NewSimpleClientset() syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ClusterIPSync = true // Start the controller - closer := controller.TestControllerRun(&ServiceResource{ - Log: hclog.Default(), - Client: client, - Syncer: syncer, - ClusterIPSync: true, - }) + closer := controller.TestControllerRun(&serviceResource) defer closer() // Insert the service - svc := clusterIPService("foo") + svc := clusterIPService("foo", metav1.NamespaceDefault) svc.Annotations[annotationServicePort] = "rpc" svc.Spec.Ports = []apiv1.ServicePort{ {Port: 80, TargetPort: intstr.FromString("httpPort"), Name: "http"}, @@ -1340,11 +1219,199 @@ func TestServiceResource_clusterIPTargetPortNamed(t *testing.T) { require.NotEqual(actual[0].Service.ID, actual[1].Service.ID) } +// Test allow/deny namespace lists. +func TestServiceResource_AllowDenyNamespaces(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + AllowList mapset.Set + DenyList mapset.Set + ExpNamespaces []string + }{ + "empty lists": { + AllowList: mapset.NewSet(), + DenyList: mapset.NewSet(), + ExpNamespaces: nil, + }, + "only from allow list": { + AllowList: mapset.NewSet("foo"), + DenyList: mapset.NewSet(), + ExpNamespaces: []string{"foo"}, + }, + "both in allow and deny": { + AllowList: mapset.NewSet("foo"), + DenyList: mapset.NewSet("foo"), + ExpNamespaces: nil, + }, + "deny removes one from allow": { + AllowList: mapset.NewSet("foo", "bar"), + DenyList: mapset.NewSet("foo"), + ExpNamespaces: []string{"bar"}, + }, + "* in allow": { + AllowList: mapset.NewSet("*"), + DenyList: mapset.NewSet(), + ExpNamespaces: []string{"foo", "bar"}, + }, + "* in allow with one denied": { + AllowList: mapset.NewSet("*"), + DenyList: mapset.NewSet("bar"), + ExpNamespaces: []string{"foo"}, + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + client := fake.NewSimpleClientset() + syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.AllowK8sNamespacesSet = c.AllowList + serviceResource.DenyK8sNamespacesSet = c.DenyList + + // Start the controller + closer := controller.TestControllerRun(&serviceResource) + defer closer() + + // We always have two services in two namespaces: foo and bar. + // Each service has the same name as its origin k8s namespace which + // we use for asserting that the right namespace got synced. + for _, ns := range []string{"foo", "bar"} { + _, err := client.CoreV1().Services(ns).Create(lbService(ns, ns, "1.2.3.4")) + require.NoError(tt, err) + } + + // Test we got registrations from the expected namespaces. + retry.Run(tt, func(r *retry.R) { + syncer.Lock() + defer syncer.Unlock() + actual := syncer.Registrations + require.Len(r, actual, len(c.ExpNamespaces)) + }) + + syncer.Lock() + defer syncer.Unlock() + for _, expNS := range c.ExpNamespaces { + found := false + for _, reg := range syncer.Registrations { + // The service names are the same as their k8s destination + // namespaces so we can that to ensure the services were + // synced from the expected namespaces. + if reg.Service.Service == expNS { + found = true + } + } + require.True(tt, found, "did not find service from ns %s", expNS) + } + }) + } +} + +// Test that services are synced to the correct destination ns +// when a single destination namespace is set. +func TestServiceResource_singleDestNamespace(t *testing.T) { + t.Parallel() + consulDestNamespaces := []string{"default", "dest"} + for _, consulDestNamespace := range consulDestNamespaces { + t.Run(consulDestNamespace, func(tt *testing.T) { + client := fake.NewSimpleClientset() + syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.ConsulDestinationNamespace = consulDestNamespace + serviceResource.EnableNamespaces = true + closer := controller.TestControllerRun(&serviceResource) + defer closer() + _, err := client.CoreV1().Services(metav1.NamespaceDefault). + Create(lbService("foo", metav1.NamespaceDefault, "1.2.3.4")) + require.NoError(tt, err) + + retry.Run(tt, func(r *retry.R) { + syncer.Lock() + defer syncer.Unlock() + actual := syncer.Registrations + require.Len(r, actual, 1) + require.Equal(r, consulDestNamespace, actual[0].Service.Namespace) + }) + }) + } +} + +// Test that services are created in a mirrored namespace. +func TestServiceResource_MirroredNamespace(t *testing.T) { + t.Parallel() + client := fake.NewSimpleClientset() + syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.EnableK8SNSMirroring = true + serviceResource.EnableNamespaces = true + closer := controller.TestControllerRun(&serviceResource) + defer closer() + + k8sNamespaces := []string{"foo", "bar", "default"} + for _, ns := range k8sNamespaces { + _, err := client.CoreV1().Services(ns). + Create(lbService(ns, ns, "1.2.3.4")) + require.NoError(t, err) + } + + retry.Run(t, func(r *retry.R) { + syncer.Lock() + defer syncer.Unlock() + actual := syncer.Registrations + require.Len(r, actual, 3) + for _, expNS := range k8sNamespaces { + found := false + for _, reg := range actual { + if reg.Service.Namespace == expNS { + found = true + } + } + require.True(r, found, "did not find registration from ns %s", expNS) + } + }) +} + +// Test that services are created in a mirrored namespace with prefix. +func TestServiceResource_MirroredPrefixNamespace(t *testing.T) { + t.Parallel() + client := fake.NewSimpleClientset() + syncer := &TestSyncer{} + serviceResource := defaultServiceResource(client, syncer) + serviceResource.EnableK8SNSMirroring = true + serviceResource.EnableNamespaces = true + serviceResource.K8SNSMirroringPrefix = "prefix-" + closer := controller.TestControllerRun(&serviceResource) + defer closer() + + k8sNamespaces := []string{"foo", "bar", "default"} + for _, ns := range k8sNamespaces { + _, err := client.CoreV1().Services(ns). + Create(lbService(ns, ns, "1.2.3.4")) + require.NoError(t, err) + } + + retry.Run(t, func(r *retry.R) { + syncer.Lock() + defer syncer.Unlock() + actual := syncer.Registrations + require.Len(r, actual, 3) + for _, expNS := range k8sNamespaces { + found := false + for _, reg := range actual { + if reg.Service.Namespace == "prefix-"+expNS { + found = true + } + } + require.True(r, found, "did not find registration from ns %s", expNS) + } + }) +} + // lbService returns a Kubernetes service of type LoadBalancer. -func lbService(name, lbIP string) *apiv1.Service { +func lbService(name, namespace, lbIP string) *apiv1.Service { return &apiv1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, + Namespace: namespace, Annotations: map[string]string{}, }, @@ -1365,10 +1432,11 @@ func lbService(name, lbIP string) *apiv1.Service { } // nodePortService returns a Kubernetes service of type NodePort. -func nodePortService(name string) *apiv1.Service { +func nodePortService(name, namespace string) *apiv1.Service { return &apiv1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: name, + Namespace: namespace, }, Spec: apiv1.ServiceSpec{ @@ -1382,10 +1450,11 @@ func nodePortService(name string) *apiv1.Service { } // clusterIPService returns a Kubernetes service of type ClusterIP. -func clusterIPService(name string) *apiv1.Service { +func clusterIPService(name, namespace string) *apiv1.Service { return &apiv1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, + Namespace: namespace, Annotations: map[string]string{}, }, @@ -1441,7 +1510,8 @@ func createEndpoints(t *testing.T, client *fake.Clientset, serviceName string, n node2 := nodeName2 _, err := client.CoreV1().Endpoints(namespace).Create(&apiv1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, + Name: serviceName, + Namespace: namespace, }, Subsets: []apiv1.EndpointSubset{ @@ -1469,3 +1539,13 @@ func createEndpoints(t *testing.T, client *fake.Clientset, serviceName string, n require.NoError(t, err) } + +func defaultServiceResource(client kubernetes.Interface, syncer Syncer) ServiceResource { + return ServiceResource{ + Log: hclog.Default(), + Client: client, + Syncer: syncer, + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + } +} diff --git a/catalog/to-consul/syncer.go b/catalog/to-consul/syncer.go index 3e2e4cef39..971e51d29c 100644 --- a/catalog/to-consul/syncer.go +++ b/catalog/to-consul/syncer.go @@ -6,6 +6,7 @@ import ( "time" "github.com/cenkalti/backoff" + "github.com/deckarep/golang-set" "github.com/hashicorp/consul/api" "github.com/hashicorp/go-hclog" ) @@ -18,6 +19,10 @@ const ( // ConsulServicePollPeriod is how often a service is checked for // whether it has instances to reap. ConsulServicePollPeriod = 60 * time.Second + + // ConsulSyncNodeName is the name of the node in Consul that we register + // services on. It's not a real node backed by a Consul agent. + ConsulSyncNodeName = "k8s-sync" ) // Syncer is responsible for syncing a set of Consul catalog registrations. @@ -37,14 +42,16 @@ type ConsulSyncer struct { Client *api.Client Log hclog.Logger - // Namespace is the namespace to run this syncer for. This is used - // primarily to limit the reaping of the syncer: the syncer will only - // reap services/nodes that 1.) have no NS key set or 2.) have an NS - // key set that is equal to this. - // - // If this is blank, any NS key is allowed. This should only be blank - // if a single syncer is running for the entire cluster. - Namespace string + // EnableNamespaces indicates that a user is running Consul Enterprise + // with version 1.7+ which is namespace aware. It enables Consul namespaces, + // with syncing into either a single Consul namespace or mirrored from + // k8s namespaces. + EnableNamespaces bool + + // CrossNamespaceACLPolicy is the name of the ACL policy to attach to + // any created Consul namespaces to allow cross namespace service discovery. + // Only necessary if ACLs are enabled. + CrossNamespaceACLPolicy string // SyncPeriod is the interval between full catalog syncs. These will // re-register all services to prevent overwrites of data. This should @@ -62,18 +69,25 @@ type ConsulSyncer struct { // ConsulK8STag is the tag value for services registered. ConsulK8STag string - lock sync.Mutex - once sync.Once - services map[string]struct{} // set of valid service names - nodes map[string]*consulSyncState - deregs map[string]*api.CatalogDeregistration - watchers map[string]context.CancelFunc -} + // ConsulNodeServicesClient is used to list services for a node. We use a + // separate client for this API call that handles older version of Consul. + ConsulNodeServicesClient ConsulNodeServicesClient -// consulSyncState keeps track of the state of syncing nodes/services. -type consulSyncState struct { - // Services keeps track of the valid services on this node (by service ID) - Services map[string]*api.CatalogRegistration + lock sync.Mutex + once sync.Once + + // serviceNames is all namespaces mapped to a set of valid + // Consul service names + serviceNames map[string]mapset.Set + + // namespaces is all namespaces mapped to a map of Consul service + // ids mapped to their CatalogRegistrations + namespaces map[string]map[string]*api.CatalogRegistration + deregs map[string]*api.CatalogDeregistration + + // watchers is all namespaces mapped to a map of Consul service + // names mapped to a cancel function for watcher routines + watchers map[string]map[string]context.CancelFunc } // Sync implements Syncer @@ -82,24 +96,28 @@ func (s *ConsulSyncer) Sync(rs []*api.CatalogRegistration) { s.lock.Lock() defer s.lock.Unlock() - s.services = make(map[string]struct{}) - s.nodes = make(map[string]*consulSyncState) - for _, r := range rs { - // Mark this as a valid service - s.services[r.Service.Service] = struct{}{} - - // Initialize the state if we don't have it - state, ok := s.nodes[r.Node] - if !ok { - state = &consulSyncState{ - Services: make(map[string]*api.CatalogRegistration), - } + s.serviceNames = make(map[string]mapset.Set) + s.namespaces = make(map[string]map[string]*api.CatalogRegistration) - s.nodes[r.Node] = state + for _, r := range rs { + // Determine the namespace the service is in to use for indexing + // against the s.serviceNames and s.namespaces maps. + // This will be "" for OSS. + ns := r.Service.Namespace + + // Mark this as a valid service, initializing state if necessary + if _, ok := s.serviceNames[ns]; !ok { + s.serviceNames[ns] = mapset.NewSet() } + s.serviceNames[ns].Add(r.Service.Service) + s.Log.Debug("[Sync] adding service to serviceNames set", "service", r.Service, "service name", r.Service.Service) - // Add our registration - state.Services[r.Service.ID] = r + // Add service to namespaces map, initializing if necessary + if _, ok := s.namespaces[ns]; !ok { + s.namespaces[ns] = make(map[string]*api.CatalogRegistration) + } + s.namespaces[ns][r.Service.ID] = r + s.Log.Debug("[Sync] adding service to namespaces map", "service", r.Service) } } @@ -133,28 +151,34 @@ func (s *ConsulSyncer) Run(ctx context.Context) { // This task only marks them for deletion but doesn't perform the actual // deletion. func (s *ConsulSyncer) watchReapableServices(ctx context.Context) { - opts := api.QueryOptions{ + opts := &api.QueryOptions{ AllowStale: true, WaitIndex: 1, WaitTime: 1 * time.Minute, } + if s.EnableNamespaces { + opts.Namespace = "*" + } + // minWait is the minimum time to wait between scheduling service deletes. // This prevents a lot of churn in services causing high CPU usage. minWait := s.SyncPeriod / 4 minWaitCh := time.After(0) for { - // Get all services with tags. - var serviceMap map[string][]string + var services []ConsulService var meta *api.QueryMeta err := backoff.Retry(func() error { var err error - serviceMap, meta, err = s.Client.Catalog().Services(&opts) + services, meta, err = s.ConsulNodeServicesClient.NodeServices(s.ConsulK8STag, ConsulSyncNodeName, *opts) return err }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) + if err != nil { s.Log.Warn("error querying services, will retry", "err", err) - continue + } else { + s.Log.Debug("[watchReapableServices] services returned from catalog", + "services", services) } // Wait our minimum time before continuing or retrying @@ -172,30 +196,31 @@ func (s *ConsulSyncer) watchReapableServices(ctx context.Context) { // Update our blocking index opts.WaitIndex = meta.LastIndex - // Lock so we can modify the + // Lock so we can modify the stored state s.lock.Lock() - // Go through the service map and find services that should be reaped - for name, tags := range serviceMap { - for _, tag := range tags { - if tag == s.ConsulK8STag { - // We only care if we don't know about this service at all. - if _, ok := s.services[name]; ok { - continue - } - - s.Log.Info("invalid service found, scheduling for delete", - "service-name", name) - if err := s.scheduleReapServiceLocked(name); err != nil { - s.Log.Info("error querying service for delete", - "service-name", name, - "err", err) - } - - // We're done searching this service, let it go - break + // Go through the service array and find services that should be reaped + for _, service := range services { + // Check that the namespace exists in the valid service names map + // before checking whether it contains the service + if _, ok := s.serviceNames[service.Namespace]; ok { + // We only care if we don't know about this service at all. + if s.serviceNames[service.Namespace].Contains(service.Name) { + s.Log.Debug("[watchReapableServices] serviceNames contains service", + "namespace", service.Namespace, + "service-name", service.Name) + continue } } + + s.Log.Info("invalid service found, scheduling for delete", + "service-name", service.Name, "service-consul-namespace", service.Namespace) + if err := s.scheduleReapServiceLocked(service.Name, service.Namespace); err != nil { + s.Log.Info("error querying service for delete", + "service-name", service.Name, + "service-consul-namespace", service.Namespace, + "err", err) + } } s.lock.Unlock() @@ -204,9 +229,9 @@ func (s *ConsulSyncer) watchReapableServices(ctx context.Context) { // watchService watches all instances of a service by name for changes // and schedules re-registration or deletion if necessary. -func (s *ConsulSyncer) watchService(ctx context.Context, name string) { - s.Log.Info("starting service watcher", "service-name", name) - defer s.Log.Info("stopping service watcher", "service-name", name) +func (s *ConsulSyncer) watchService(ctx context.Context, name, namespace string) { + s.Log.Info("starting service watcher", "service-name", name, "service-consul-namespace", namespace) + defer s.Log.Info("stopping service watcher", "service-name", name, "service-consul-namespace", namespace) for { select { @@ -218,18 +243,26 @@ func (s *ConsulSyncer) watchService(ctx context.Context, name string) { case <-time.After(s.SyncPeriod): } + // Set up query options + queryOpts := &api.QueryOptions{ + AllowStale: true, + } + if s.EnableNamespaces { + // Sets the Consul namespace to query the catalog + queryOpts.Namespace = namespace + } + // Wait for service changes var services []*api.CatalogService err := backoff.Retry(func() error { var err error - services, _, err = s.Client.Catalog().Service(name, s.ConsulK8STag, &api.QueryOptions{ - AllowStale: true, - }) + services, _, err = s.Client.Catalog().Service(name, s.ConsulK8STag, queryOpts) return err }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) if err != nil { s.Log.Warn("error querying service, will retry", "service-name", name, + "service-namespace", namespace, // will be "" if namespaces aren't enabled "err", err) continue } @@ -238,28 +271,26 @@ func (s *ConsulSyncer) watchService(ctx context.Context, name string) { s.lock.Lock() for _, svc := range services { - // If we have a namespace set and the key exactly matches this - // namespace, then we skip it. - if s.Namespace != "" && - len(svc.ServiceMeta) > 0 && - svc.ServiceMeta[ConsulK8SNS] != "" && - svc.ServiceMeta[ConsulK8SNS] != s.Namespace { - continue + // Make sure the namespace exists before we run checks against it + if _, ok := s.serviceNames[namespace]; ok { + // If the service is valid and its info isn't nil, we don't deregister it + if s.serviceNames[namespace].Contains(svc.ServiceName) && s.namespaces[namespace][svc.ServiceID] != nil { + continue + } } - // We delete unless we have a service and the node mapping - delete := true - if _, ok := s.services[svc.ServiceName]; ok { - nodeSvc := s.nodes[svc.Node] - delete = nodeSvc == nil || nodeSvc.Services[svc.ServiceID] == nil + s.deregs[svc.ServiceID] = &api.CatalogDeregistration{ + Node: svc.Node, + ServiceID: svc.ServiceID, } - - if delete { - s.deregs[svc.ServiceID] = &api.CatalogDeregistration{ - Node: svc.Node, - ServiceID: svc.ServiceID, - } + if s.EnableNamespaces { + s.deregs[svc.ServiceID].Namespace = namespace } + s.Log.Debug("[watchService] service being scheduled for deregistration", + "namespace", namespace, + "service name", svc.ServiceName, + "service id", svc.ServiceID, + "service dereg", s.deregs[svc.ServiceID]) } s.lock.Unlock() @@ -270,28 +301,33 @@ func (s *ConsulSyncer) watchService(ctx context.Context, name string) { // name that have the k8s tag and schedules them for removal. // // Precondition: lock must be held -func (s *ConsulSyncer) scheduleReapServiceLocked(name string) error { - services, _, err := s.Client.Catalog().Service(name, s.ConsulK8STag, &api.QueryOptions{ - AllowStale: true, - }) +func (s *ConsulSyncer) scheduleReapServiceLocked(name, namespace string) error { + // Set up query options + opts := api.QueryOptions{AllowStale: true} + if s.EnableNamespaces { + opts.Namespace = namespace + } + + // Only consider services that are tagged from k8s + services, _, err := s.Client.Catalog().Service(name, s.ConsulK8STag, &opts) if err != nil { return err } + // Create deregistrations for all of these for _, svc := range services { - // If we have a namespace set and the key exactly matches this - // namespace, then we skip it. - if s.Namespace != "" && - len(svc.ServiceMeta) > 0 && - svc.ServiceMeta[ConsulK8SNS] != "" && - svc.ServiceMeta[ConsulK8SNS] != s.Namespace { - continue - } - s.deregs[svc.ServiceID] = &api.CatalogDeregistration{ Node: svc.Node, ServiceID: svc.ServiceID, } + if s.EnableNamespaces { + s.deregs[svc.ServiceID].Namespace = namespace + } + s.Log.Debug("[scheduleReapServiceLocked] service being scheduled for deregistration", + "namespace", namespace, + "service name", svc.ServiceName, + "service id", svc.ServiceID, + "service dereg", s.deregs[svc.ServiceID]) } return nil @@ -306,18 +342,35 @@ func (s *ConsulSyncer) syncFull(ctx context.Context) { s.Log.Info("registering services") - // Start the service watchers - for k, cf := range s.watchers { - if _, ok := s.services[k]; !ok { - cf() - delete(s.watchers, k) + // Update the service watchers + for ns, watchers := range s.watchers { + // If the service the watcher is watching is no longer valid, + // cancel the watcher + for svc, cf := range watchers { + if s.serviceNames[ns] == nil || !s.serviceNames[ns].Contains(svc) { + cf() + delete(s.watchers[ns], svc) + s.Log.Debug("[syncFull] deleting service watcher", "namespace", ns, "service", svc) + } } } - for k := range s.services { - if _, ok := s.watchers[k]; !ok { - svcCtx, cancelF := context.WithCancel(ctx) - go s.watchService(svcCtx, k) - s.watchers[k] = cancelF + + // Start watchers for all services if they're not already running + for ns, services := range s.serviceNames { + for svc := range services.Iter() { + if _, ok := s.watchers[ns][svc.(string)]; !ok { + svcCtx, cancelF := context.WithCancel(ctx) + go s.watchService(svcCtx, svc.(string), ns) + s.Log.Debug("[syncFull] starting watchService routine", "namespace", ns, "service", svc) + + // Create watcher map if it doesn't exist for this namespace + if s.watchers[ns] == nil { + s.watchers[ns] = make(map[string]context.CancelFunc) + } + + // Add the watcher to our tracking + s.watchers[ns][svc.(string)] = cancelF + } } } @@ -325,12 +378,14 @@ func (s *ConsulSyncer) syncFull(ctx context.Context) { for _, r := range s.deregs { s.Log.Info("deregistering service", "node-name", r.Node, - "service-id", r.ServiceID) + "service-id", r.ServiceID, + "service-consul-namespace", r.Namespace) _, err := s.Client.Catalog().Deregister(r, nil) if err != nil { s.Log.Warn("error deregistering service", "node-name", r.Node, "service-id", r.ServiceID, + "service-consul-namespace", r.Namespace, "err", err) } } @@ -340,36 +395,56 @@ func (s *ConsulSyncer) syncFull(ctx context.Context) { // Register all the services. This will overwrite any changes that // may have been made to the registered services. - for _, state := range s.nodes { - for _, r := range state.Services { + for _, services := range s.namespaces { + for _, r := range services { + if s.EnableNamespaces { + // Check and potentially create the service's namespace if + // it doesn't already exist + err := s.checkAndCreateNamespace(r.Service.Namespace) + if err != nil { + s.Log.Warn("error checking and creating Consul namespace", + "node-name", r.Node, + "service-name", r.Service.Service, + "consul-namespace-name", r.Service.Namespace, + "err", err) + continue + } + } + + // Register the service _, err := s.Client.Catalog().Register(r, nil) if err != nil { s.Log.Warn("error registering service", "node-name", r.Node, "service-name", r.Service.Service, + "service", r.Service, "err", err) continue } s.Log.Debug("registered service instance", "node-name", r.Node, - "service-name", r.Service.Service) + "service-name", r.Service.Service, + "consul-namespace-name", r.Service.Namespace, + "service", r.Service) } } } func (s *ConsulSyncer) init() { - if s.services == nil { - s.services = make(map[string]struct{}) + s.lock.Lock() + defer s.lock.Unlock() + if s.serviceNames == nil { + s.serviceNames = make(map[string]mapset.Set) } - if s.nodes == nil { - s.nodes = make(map[string]*consulSyncState) + if s.namespaces == nil { + s.namespaces = make(map[string]map[string]*api.CatalogRegistration) } if s.deregs == nil { s.deregs = make(map[string]*api.CatalogDeregistration) } if s.watchers == nil { - s.watchers = make(map[string]context.CancelFunc) + s.watchers = make(map[string]map[string]context.CancelFunc) } if s.SyncPeriod == 0 { s.SyncPeriod = ConsulSyncPeriod @@ -378,3 +453,40 @@ func (s *ConsulSyncer) init() { s.ServicePollPeriod = ConsulServicePollPeriod } } + +func (s *ConsulSyncer) checkAndCreateNamespace(ns string) error { + // Check if the Consul namespace exists + namespaceInfo, _, err := s.Client.Namespaces().Read(ns, nil) + if err != nil { + return err + } + + // If not, create it + if namespaceInfo == nil { + var aclConfig api.NamespaceACLConfig + if s.CrossNamespaceACLPolicy != "" { + // Create the ACLs config for the cross-Consul-namespace + // default policy that needs to be attached + aclConfig = api.NamespaceACLConfig{ + PolicyDefaults: []api.ACLLink{ + {Name: s.CrossNamespaceACLPolicy}, + }, + } + } + + consulNamespace := api.Namespace{ + Name: ns, + Description: "Auto-generated by a Catalog Sync Process", + ACLs: &aclConfig, + Meta: map[string]string{"external-source": "kubernetes"}, + } + + _, _, err = s.Client.Namespaces().Create(&consulNamespace, nil) + if err != nil { + return err + } + s.Log.Info("creating consul namespace", "name", consulNamespace.Name) + } + + return nil +} diff --git a/catalog/to-consul/syncer_ent_test.go b/catalog/to-consul/syncer_ent_test.go new file mode 100644 index 0000000000..c81fea35ab --- /dev/null +++ b/catalog/to-consul/syncer_ent_test.go @@ -0,0 +1,178 @@ +// +build enterprise + +package catalog + +import ( + "testing" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/stretchr/testify/require" +) + +// Test that the syncer registers services in Consul namespaces. +func TestConsulSyncer_ConsulNamespaces(t *testing.T) { + t.Parallel() + a, err := testutil.NewTestServerT(t) + require.NoError(t, err) + defer a.Stop() + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + s, closer := testConsulSyncerWithConfig(client, func(s *ConsulSyncer) { + s.EnableNamespaces = true + s.ConsulNodeServicesClient = &NamespacesNodeServicesClient{ + Client: client, + } + }) + defer closer() + + // We expect services to be created in the default and foo namespaces. + namespaces := []string{"default", "foo"} + var registrations []*api.CatalogRegistration + for _, ns := range namespaces { + registrations = append(registrations, + // The services will be named the same as their namespaces. + testRegistrationNS(ConsulSyncNodeName, ns, ns, ns)) + } + s.Sync(registrations) + + retry.Run(t, func(r *retry.R) { + for _, ns := range namespaces { + svcInstances, _, err := client.Catalog().Service(ns, "k8s", &api.QueryOptions{ + Namespace: ns, + }) + require.NoError(r, err) + require.Len(r, svcInstances, 1) + instance := svcInstances[0] + require.Equal(r, ConsulSyncNodeName, instance.Node) + require.Equal(r, "127.0.0.1", instance.Address) + require.Equal(r, map[string]string{ConsulSourceKey: "k8s"}, instance.NodeMeta) + require.Equal(r, map[string]string{ + ConsulSourceKey: "k8s", + ConsulK8SNS: ns, + }, instance.ServiceMeta) + } + }) +} + +// Test the syncer reaps services that weren't registered by us +// across all Consul namespaces. +func TestConsulSyncer_ReapConsulNamespace(t *testing.T) { + t.Parallel() + a, err := testutil.NewTestServerT(t) + require.NoError(t, err) + defer a.Stop() + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + s, closer := testConsulSyncerWithConfig(client, func(s *ConsulSyncer) { + s.EnableNamespaces = true + s.ConsulNodeServicesClient = &NamespacesNodeServicesClient{ + Client: client, + } + }) + defer closer() + + // We expect services to be created in the default and foo namespaces. + s.Sync([]*api.CatalogRegistration{ + testRegistrationNS(ConsulSyncNodeName, "default", "default", "default"), + testRegistrationNS(ConsulSyncNodeName, "foo", "foo", "foo"), + }) + + // We create services we expect to be deleted in the bar and baz namespaces. + expEmptiedNamespaces := []string{"bar", "baz"} + for _, ns := range expEmptiedNamespaces { + svc := testRegistrationNS(ConsulSyncNodeName, ns, ns, ns) + _, _, err := client.Namespaces().Create(&api.Namespace{ + Name: ns, + }, nil) + require.NoError(t, err) + _, err = client.Catalog().Register(svc, &api.WriteOptions{ + Namespace: ns, + }) + require.NoError(t, err) + } + + retry.Run(t, func(r *retry.R) { + // Invalid services should be deleted. + for _, ns := range expEmptiedNamespaces { + svcs, _, err := client.Catalog().Services(&api.QueryOptions{ + Namespace: ns, + }) + require.NoError(r, err) + require.Len(r, svcs, 0) + } + + // The services in the foo and default namespaces should still exist. + for _, ns := range []string{"default", "foo"} { + svcs, _, err := client.Catalog().Services(&api.QueryOptions{ + Namespace: ns, + }) + require.NoError(r, err) + // The default namespace should have the consul service registered + // so its count should be 2. + if ns == "default" { + require.Len(r, svcs, 2) + } else { + require.Len(r, svcs, 1) + } + } + }) +} + +// Test that the syncer reaps individual invalid service instances when +// namespaces are enabled. +func TestConsulSyncer_reapServiceInstanceNamespacesEnabled(t *testing.T) { + t.Parallel() + a, err := testutil.NewTestServerT(t) + require.NoError(t, err) + defer a.Stop() + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + s, closer := testConsulSyncerWithConfig(client, func(s *ConsulSyncer) { + s.EnableNamespaces = true + s.ConsulNodeServicesClient = &NamespacesNodeServicesClient{ + Client: client, + } + }) + defer closer() + + // We'll create one service in the foo namespace. It should only have one + // instance. + s.Sync([]*api.CatalogRegistration{ + testRegistrationNS(ConsulSyncNodeName, "foo", "foo", "foo"), + }) + + // Create an invalid instance service directly in Consul. + _, _, err = client.Namespaces().Create(&api.Namespace{ + Name: "foo", + }, nil) + require.NoError(t, err) + svc := testRegistrationNS(ConsulSyncNodeName, "foo", "foo", "foo") + svc.Service.ID = serviceID("k8s-sync", "foo2") + _, err = client.Catalog().Register(svc, nil) + require.NoError(t, err) + + // Test that the invalid instance is reaped. + retry.Run(t, func(r *retry.R) { + services, _, err := client.Catalog().Service("foo", "", &api.QueryOptions{ + Namespace: "foo", + }) + require.NoError(r, err) + require.Len(r, services, 1) + require.Equal(r, "foo", services[0].ServiceName) + }) +} + +func testRegistrationNS(node, service, k8sSrcNS, consulDestNS string) *api.CatalogRegistration { + r := testRegistration(node, service, k8sSrcNS) + r.Service.Namespace = consulDestNS + return r +} diff --git a/catalog/to-consul/syncer_test.go b/catalog/to-consul/syncer_test.go index a563653de1..88581c467b 100644 --- a/catalog/to-consul/syncer_test.go +++ b/catalog/to-consul/syncer_test.go @@ -2,11 +2,14 @@ package catalog import ( "context" + "net/http" + "net/http/httptest" "testing" "time" "github.com/hashicorp/consul/agent" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/go-hclog" @@ -22,12 +25,12 @@ func TestConsulSyncer_register(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") client := a.Client() - s, closer := testConsulSyncer(t, client) + s, closer := testConsulSyncer(client) defer closer() // Sync s.Sync([]*api.CatalogRegistration{ - testRegistration("foo", "bar"), + testRegistration(ConsulSyncNodeName, "bar", "default"), }) // Read the service back out @@ -44,64 +47,12 @@ func TestConsulSyncer_register(t *testing.T) { }) // Verify the settings - require.Equal("foo", service.Node) + require.Equal("k8s-sync", service.Node) require.Equal("bar", service.ServiceName) require.Equal("127.0.0.1", service.Address) } -// Test that the syncer reaps invalid services -func TestConsulSyncer_reapService(t *testing.T) { - t.Parallel() - require := require.New(t) - - a := agent.NewTestAgent(t, t.Name(), ``) - defer a.Shutdown() - testrpc.WaitForTestAgent(t, a.RPC, "dc1") - client := a.Client() - - s, closer := testConsulSyncer(t, client) - defer closer() - - // Sync - s.Sync([]*api.CatalogRegistration{ - testRegistration("foo", "bar"), - }) - - // Create an invalid service directly in Consul - _, err := client.Catalog().Register(testRegistration("foo", "baz"), nil) - require.NoError(err) - - // Reaped service should not exist - retry.Run(t, func(r *retry.R) { - services, _, err := client.Catalog().Service("baz", "", nil) - if err != nil { - r.Fatalf("err: %s", err) - } - if len(services) > 0 { - r.Fatal("service still exists") - } - }) - - // Valid service should exist - var service *api.CatalogService - retry.Run(t, func(r *retry.R) { - services, _, err := client.Catalog().Service("bar", "", nil) - if err != nil { - r.Fatalf("err: %s", err) - } - if len(services) == 0 { - r.Fatal("service not found") - } - service = services[0] - }) - - // Verify the settings - require.Equal("foo", service.Node) - require.Equal("bar", service.ServiceName) - require.Equal("127.0.0.1", service.Address) -} - -// Test that the syncer reaps invalid services by instance +// Test that the syncer reaps individual invalid service instances. func TestConsulSyncer_reapServiceInstance(t *testing.T) { t.Parallel() require := require.New(t) @@ -111,12 +62,12 @@ func TestConsulSyncer_reapServiceInstance(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") client := a.Client() - s, closer := testConsulSyncer(t, client) + s, closer := testConsulSyncer(client) defer closer() // Sync s.Sync([]*api.CatalogRegistration{ - testRegistration("foo", "bar"), + testRegistration(ConsulSyncNodeName, "bar", "default"), }) // Wait for the first service @@ -131,8 +82,8 @@ func TestConsulSyncer_reapServiceInstance(t *testing.T) { }) // Create an invalid service directly in Consul - svc := testRegistration("foo", "bar") - svc.Service.ID = serviceID("foo", "bar2") + svc := testRegistration(ConsulSyncNodeName, "bar", "default") + svc.Service.ID = serviceID("k8s-sync", "bar2") _, err := client.Catalog().Register(svc, nil) require.NoError(err) @@ -150,100 +101,97 @@ func TestConsulSyncer_reapServiceInstance(t *testing.T) { }) // Verify the settings - require.Equal(serviceID("foo", "bar"), service.ServiceID) - require.Equal("foo", service.Node) + require.Equal(serviceID("k8s-sync", "bar"), service.ServiceID) + require.Equal("k8s-sync", service.Node) require.Equal("bar", service.ServiceName) require.Equal("127.0.0.1", service.Address) } -// Test that the syncer does not reap services in another NS. -func TestConsulSyncer_reapServiceOtherNamespace(t *testing.T) { +// Test that the syncer reaps services not registered by us that are tagged +// with k8s. +func TestConsulSyncer_reapService(t *testing.T) { t.Parallel() - require := require.New(t) - - a := agent.NewTestAgent(t, t.Name(), ``) - defer a.Shutdown() - testrpc.WaitForTestAgent(t, a.RPC, "dc1") - client := a.Client() - - s, closer := testConsulSyncer(t, client) - defer closer() - // Sync - s.Sync([]*api.CatalogRegistration{ - testRegistration("foo", "bar"), - }) - - // Create an invalid service directly in Consul - svc := testRegistration("foo", "baz") - svc.Service.Meta[ConsulK8SNS] = "other" - _, err := client.Catalog().Register(svc, nil) - require.NoError(err) - - // Sleep for a bit - time.Sleep(500 * time.Millisecond) - - // Valid service should exist - services, _, err := client.Catalog().Service("baz", "", nil) - require.NoError(err) - require.Len(services, 1) + sourceK8sNamespaceAnnotations := []string{"", "other", "default"} + for _, k8sNS := range sourceK8sNamespaceAnnotations { + t.Run(k8sNS, func(tt *testing.T) { + a, err := testutil.NewTestServerT(tt) + require.NoError(tt, err) + defer a.Stop() + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(tt, err) + s, closer := testConsulSyncer(client) + defer closer() + + s.Sync([]*api.CatalogRegistration{ + testRegistration(ConsulSyncNodeName, "bar", "default"), + }) + + // Create a service directly in Consul. Since it was created directly we + // expect it to be deleted. + svc := testRegistration(ConsulSyncNodeName, "baz", "default") + svc.Service.Meta[ConsulK8SNS] = k8sNS + _, err = client.Catalog().Register(svc, nil) + require.NoError(tt, err) + + retry.Run(tt, func(r *retry.R) { + // Invalid service should be deleted. + bazInstances, _, err := client.Catalog().Service("baz", "", nil) + require.NoError(r, err) + require.Len(r, bazInstances, 0) + + // Valid service should still be registered. + barInstances, _, err := client.Catalog().Service("bar", "", nil) + require.NoError(r, err) + require.Len(r, barInstances, 1) + service := barInstances[0] + require.Equal(r, ConsulSyncNodeName, service.Node) + require.Equal(r, "bar", service.ServiceName) + require.Equal(r, "127.0.0.1", service.Address) + }) + }) + } } -// Test that the syncer reaps services with no NS set. -func TestConsulSyncer_reapServiceSameNamespace(t *testing.T) { +// Test that when the syncer is stopped, we don't continue to call the Consul +// API. This test was added as a regression test after a bug was discovered +// that after the context was cancelled, we would continue to make API calls +// to the Consul API in a tight loop. +func TestConsulSyncer_stopsGracefully(t *testing.T) { t.Parallel() - require := require.New(t) - - a := agent.NewTestAgent(t, t.Name(), ``) - defer a.Shutdown() - testrpc.WaitForTestAgent(t, a.RPC, "dc1") - client := a.Client() - - s, closer := testConsulSyncer(t, client) - defer closer() - - // Sync - s.Sync([]*api.CatalogRegistration{ - testRegistration("foo", "bar"), - }) - - // Create an invalid service directly in Consul - svc := testRegistration("foo", "baz") - svc.Service.Meta[ConsulK8SNS] = "" - _, err := client.Catalog().Register(svc, nil) - require.NoError(err) - // Reaped service should not exist - retry.Run(t, func(r *retry.R) { - services, _, err := client.Catalog().Service("baz", "", nil) - if err != nil { - r.Fatalf("err: %s", err) - } - if len(services) > 0 { - r.Fatal("service still exists") - } + // We use a test http server here so we can count the number of calls. + callCount := 0 + consulServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + callCount++ + // We need to respond with errors to trigger the bug. If we don't + // then the code path is only encountered after a timeout which we + // won't trigger in the test. + w.WriteHeader(500) + })) + defer consulServer.Close() + + // Start the syncer. + client, err := api.NewClient(&api.Config{ + Address: consulServer.URL, }) - - // Valid service should exist - var service *api.CatalogService - retry.Run(t, func(r *retry.R) { - services, _, err := client.Catalog().Service("bar", "", nil) - if err != nil { - r.Fatalf("err: %s", err) - } - if len(services) == 0 { - r.Fatal("service not found") - } - service = services[0] + require.NoError(t, err) + s, closer := testConsulSyncer(client) + s.Sync([]*api.CatalogRegistration{ + testRegistration("k8s-sync", "bar", "default"), }) - // Verify the settings - require.Equal("foo", service.Node) - require.Equal("bar", service.ServiceName) - require.Equal("127.0.0.1", service.Address) + // Compare the call count before and after stopping the server. + beforeStopAPICount := callCount + closer() + time.Sleep(100 * time.Millisecond) + // Before the bugfix, the count would be >100. + require.LessOrEqual(t, callCount-beforeStopAPICount, 2) } -func testRegistration(node, service string) *api.CatalogRegistration { +func testRegistration(node, service, k8sSrcNamespace string) *api.CatalogRegistration { return &api.CatalogRegistration{ Node: node, Address: "127.0.0.1", @@ -255,21 +203,31 @@ func testRegistration(node, service string) *api.CatalogRegistration { Tags: []string{TestConsulK8STag}, Meta: map[string]string{ ConsulSourceKey: TestConsulK8STag, - ConsulK8SNS: "default", + ConsulK8SNS: k8sSrcNamespace, }, }, } } -func testConsulSyncer(t *testing.T, client *api.Client) (*ConsulSyncer, func()) { +func testConsulSyncer(client *api.Client) (*ConsulSyncer, func()) { + return testConsulSyncerWithConfig(client, func(syncer *ConsulSyncer) {}) +} + +// testConsulSyncerWithConfig starts a consul syncer that can be configured +// prior to starting via the configurator method. +func testConsulSyncerWithConfig(client *api.Client, configurator func(*ConsulSyncer)) (*ConsulSyncer, func()) { s := &ConsulSyncer{ Client: client, Log: hclog.Default(), SyncPeriod: 200 * time.Millisecond, ServicePollPeriod: 50 * time.Millisecond, - Namespace: "default", ConsulK8STag: TestConsulK8STag, + ConsulNodeServicesClient: &PreNamespacesNodeServicesClient{ + Client: client, + }, } + configurator(s) + s.init() ctx, cancelF := context.WithCancel(context.Background()) doneCh := make(chan struct{}) diff --git a/connect-inject/container_init.go b/connect-inject/container_init.go index 8ae79a06e2..f995da0813 100644 --- a/connect-inject/container_init.go +++ b/connect-inject/container_init.go @@ -21,9 +21,14 @@ type initContainerCommandData struct { // WriteServiceDefaults controls whether a service-defaults config is // written for this service. WriteServiceDefaults bool - Upstreams []initContainerCommandUpstreamData - Tags string - Meta map[string]string + // ConsulNamespace is the Consul namespace to register the service + // and proxy in. An empty string indicates namespaces are not + // enabled in Consul (necessary for OSS). + ConsulNamespace string + NamespaceMirroringEnabled bool + Upstreams []initContainerCommandUpstreamData + Tags string + Meta map[string]string // The PEM-encoded CA certificate to use when // communicating with Consul clients @@ -31,15 +36,16 @@ type initContainerCommandData struct { } type initContainerCommandUpstreamData struct { - Name string - LocalPort int32 - Datacenter string - Query string + Name string + LocalPort int32 + ConsulUpstreamNamespace string + Datacenter string + Query string } // containerInit returns the init container spec for registering the Consul // service, setting up the Envoy bootstrap, etc. -func (h *Handler) containerInit(pod *corev1.Pod) (corev1.Container, error) { +func (h *Handler) containerInit(pod *corev1.Pod, k8sNamespace string) (corev1.Container, error) { protocol := h.DefaultProtocol if annoProtocol, ok := pod.Annotations[annotationProtocol]; ok { protocol = annoProtocol @@ -50,13 +56,16 @@ func (h *Handler) containerInit(pod *corev1.Pod) (corev1.Container, error) { // would then override any global proxy-defaults config. Now, we only // write the config if a protocol is explicitly set. writeServiceDefaults := h.WriteServiceDefaults && protocol != "" + data := initContainerCommandData{ - ServiceName: pod.Annotations[annotationService], - ProxyServiceName: fmt.Sprintf("%s-sidecar-proxy", pod.Annotations[annotationService]), - ServiceProtocol: protocol, - AuthMethod: h.AuthMethod, - WriteServiceDefaults: writeServiceDefaults, - ConsulCACert: h.ConsulCACert, + ServiceName: pod.Annotations[annotationService], + ProxyServiceName: fmt.Sprintf("%s-sidecar-proxy", pod.Annotations[annotationService]), + ServiceProtocol: protocol, + AuthMethod: h.AuthMethod, + WriteServiceDefaults: writeServiceDefaults, + ConsulNamespace: h.consulNamespace(k8sNamespace), + NamespaceMirroringEnabled: h.EnableK8SNSMirroring, + ConsulCACert: h.ConsulCACert, } if data.ServiceName == "" { // Assertion, since we call defaultAnnotations above and do @@ -86,7 +95,7 @@ func (h *Handler) containerInit(pod *corev1.Pod) (corev1.Container, error) { // this in an HCL config file and HCL arrays are json formatted. jsonTags, err := json.Marshal(tags) if err != nil { - h.Log.Error("Error json marshaling tags", "Error", err, "Tags", tags) + h.Log.Error("Error json marshaling tags", "err", err, "Tags", tags) } else { data.Tags = string(jsonTags) } @@ -105,14 +114,25 @@ func (h *Handler) containerInit(pod *corev1.Pod) (corev1.Container, error) { for _, raw := range strings.Split(raw, ",") { parts := strings.SplitN(raw, ":", 3) - var datacenter, service_name, prepared_query string + var datacenter, service_name, prepared_query, namespace string var port int32 if parts[0] == "prepared_query" { port, _ = portValue(pod, strings.TrimSpace(parts[2])) prepared_query = strings.TrimSpace(parts[1]) } else { port, _ = portValue(pod, strings.TrimSpace(parts[1])) - service_name = strings.TrimSpace(parts[0]) + + // Parse the namespace if provided + if data.ConsulNamespace != "" { + pieces := strings.SplitN(parts[0], ".", 2) + service_name = pieces[0] + + if len(pieces) > 1 { + namespace = pieces[1] + } + } else { + service_name = strings.TrimSpace(parts[0]) + } // parse the optional datacenter if len(parts) > 2 { @@ -121,12 +141,19 @@ func (h *Handler) containerInit(pod *corev1.Pod) (corev1.Container, error) { } if port > 0 { - data.Upstreams = append(data.Upstreams, initContainerCommandUpstreamData{ + upstream := initContainerCommandUpstreamData{ Name: service_name, LocalPort: port, Datacenter: datacenter, Query: prepared_query, - }) + } + + // Add namespace to upstream + if namespace != "" { + upstream.ConsulUpstreamNamespace = namespace + } + + data.Upstreams = append(data.Upstreams, upstream) } } } @@ -225,6 +252,9 @@ services { kind = "connect-proxy" address = "${POD_IP}" port = 20000 + {{- if .ConsulNamespace }} + namespace = "{{ .ConsulNamespace }}" + {{- end }} {{- if .Tags}} tags = {{.Tags}} {{- end}} @@ -253,6 +283,9 @@ services { destination_type = "prepared_query" destination_name = "{{ .Query}}" {{- end}} + {{- if .ConsulUpstreamNamespace }} + destination_namespace = "{{ .ConsulUpstreamNamespace }}" + {{- end}} local_bind_port = {{ .LocalPort }} {{- if .Datacenter }} datacenter = "{{ .Datacenter }}" @@ -279,6 +312,9 @@ services { name = "{{ .ServiceName }}" address = "${POD_IP}" port = {{ .ServicePort }} + {{- if .ConsulNamespace }} + namespace = "{{ .ConsulNamespace }}" + {{- end }} {{- if .Tags}} tags = {{.Tags}} {{- end}} @@ -298,17 +334,31 @@ cat </consul/connect-inject/service-defaults.hcl kind = "service-defaults" name = "{{ .ServiceName }}" protocol = "{{ .ServiceProtocol }}" +{{- if .ConsulNamespace }} +namespace = "{{ .ConsulNamespace }}" +{{- end }} EOF {{- end }} + {{- if .AuthMethod }} /bin/consul login -method="{{ .AuthMethod }}" \ -bearer-token-file="/var/run/secrets/kubernetes.io/serviceaccount/token" \ -token-sink-file="/consul/connect-inject/acl-token" \ + {{- if.ConsulNamespace }} + {{- if .NamespaceMirroringEnabled }} + {{- /* If namespace mirroring is enabled, the auth method is + defined in the default namespace */}} + -namespace="default" \ + {{- else }} + -namespace="{{ .ConsulNamespace }}" \ + {{- end }} + {{- end }} -meta="pod=${POD_NAMESPACE}/${POD_NAME}" {{- /* The acl token file needs to be read by the lifecycle-sidecar which runs as non-root user consul-k8s. */}} chmod 444 /consul/connect-inject/acl-token {{- end }} + {{- if .WriteServiceDefaults }} {{- /* We use -cas and -modify-index 0 so that if a service-defaults config already exists for this service, we don't override it */}} @@ -316,6 +366,9 @@ chmod 444 /consul/connect-inject/acl-token {{- if .AuthMethod }} -token-file="/consul/connect-inject/acl-token" \ {{- end }} + {{- if .ConsulNamespace }} + -namespace="{{ .ConsulNamespace }}" \ + {{- end }} /consul/connect-inject/service-defaults.hcl || true {{- end }} @@ -323,6 +376,9 @@ chmod 444 /consul/connect-inject/acl-token {{- if .AuthMethod }} -token-file="/consul/connect-inject/acl-token" \ {{- end }} + {{- if .ConsulNamespace }} + -namespace="{{ .ConsulNamespace }}" \ + {{- end }} /consul/connect-inject/service.hcl # Generate the envoy bootstrap code @@ -331,6 +387,9 @@ chmod 444 /consul/connect-inject/acl-token {{- if .AuthMethod }} -token-file="/consul/connect-inject/acl-token" \ {{- end }} + {{- if .ConsulNamespace }} + -namespace="{{ .ConsulNamespace }}" \ + {{- end }} -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml # Copy the Consul binary diff --git a/connect-inject/container_init_test.go b/connect-inject/container_init_test.go index 34fceb2a72..685a50e889 100644 --- a/connect-inject/container_init_test.go +++ b/connect-inject/container_init_test.go @@ -10,6 +10,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const k8sNamespace = "k8snamespace" + func TestHandlerContainerInit(t *testing.T) { minimal := func() *corev1.Pod { return &corev1.Pod{ @@ -191,6 +193,7 @@ services { "", `datacenter`, }, + { "Upstream prepared query", func(pod *corev1.Pod) *corev1.Pod { @@ -483,7 +486,574 @@ services { require := require.New(t) var h Handler - container, err := h.containerInit(tt.Pod(minimal())) + container, err := h.containerInit(tt.Pod(minimal()), k8sNamespace) + require.NoError(err) + actual := strings.Join(container.Command, " ") + require.Contains(actual, tt.Cmd) + if tt.CmdNot != "" { + require.NotContains(actual, tt.CmdNot) + } + }) + } +} + +func TestHandlerContainerInit_namespacesEnabled(t *testing.T) { + minimal := func() *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + { + Name: "web-side", + }, + { + Name: "auth-method-secret", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "service-account-secret", + MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", + }, + }, + }, + }, + }, + } + } + + cases := []struct { + Name string + Pod func(*corev1.Pod) *corev1.Pod + Handler Handler + K8sNamespace string + Cmd string // Strings.Contains test + CmdNot string // Not contains + }{ + { + "Only service, whole template, default namespace", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + Handler{ + EnableNamespaces: true, + ConsulDestinationNamespace: "default", + }, + k8sNamespace, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" + +# Register the service. The HCL is stored in the volume so that +# the preStop hook can access it to deregister the service. +cat </consul/connect-inject/service.hcl +services { + id = "${PROXY_SERVICE_ID}" + name = "web-sidecar-proxy" + kind = "connect-proxy" + address = "${POD_IP}" + port = 20000 + namespace = "default" + + proxy { + destination_service_name = "web" + destination_service_id = "${SERVICE_ID}" + } + + checks { + name = "Proxy Public Listener" + tcp = "${POD_IP}:20000" + interval = "10s" + deregister_critical_service_after = "10m" + } + + checks { + name = "Destination Alias" + alias_service = "web" + } +} + +services { + id = "${SERVICE_ID}" + name = "web" + address = "${POD_IP}" + port = 0 + namespace = "default" +} +EOF + +/bin/consul services register \ + -namespace="default" \ + /consul/connect-inject/service.hcl + +# Generate the envoy bootstrap code +/bin/consul connect envoy \ + -proxy-id="${PROXY_SERVICE_ID}" \ + -namespace="default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml + +# Copy the Consul binary +cp /bin/consul /consul/connect-inject/consul`, + "", + }, + + { + "Only service, whole template, non-default namespace", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + Handler{ + EnableNamespaces: true, + ConsulDestinationNamespace: "non-default", + }, + k8sNamespace, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" + +# Register the service. The HCL is stored in the volume so that +# the preStop hook can access it to deregister the service. +cat </consul/connect-inject/service.hcl +services { + id = "${PROXY_SERVICE_ID}" + name = "web-sidecar-proxy" + kind = "connect-proxy" + address = "${POD_IP}" + port = 20000 + namespace = "non-default" + + proxy { + destination_service_name = "web" + destination_service_id = "${SERVICE_ID}" + } + + checks { + name = "Proxy Public Listener" + tcp = "${POD_IP}:20000" + interval = "10s" + deregister_critical_service_after = "10m" + } + + checks { + name = "Destination Alias" + alias_service = "web" + } +} + +services { + id = "${SERVICE_ID}" + name = "web" + address = "${POD_IP}" + port = 0 + namespace = "non-default" +} +EOF + +/bin/consul services register \ + -namespace="non-default" \ + /consul/connect-inject/service.hcl + +# Generate the envoy bootstrap code +/bin/consul connect envoy \ + -proxy-id="${PROXY_SERVICE_ID}" \ + -namespace="non-default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml + +# Copy the Consul binary +cp /bin/consul /consul/connect-inject/consul`, + "", + }, + + { + "Whole template, auth method, non-default namespace, mirroring disabled", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + Handler{ + AuthMethod: "auth-method", + EnableNamespaces: true, + ConsulDestinationNamespace: "non-default", + }, + k8sNamespace, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" + +# Register the service. The HCL is stored in the volume so that +# the preStop hook can access it to deregister the service. +cat </consul/connect-inject/service.hcl +services { + id = "${PROXY_SERVICE_ID}" + name = "web-sidecar-proxy" + kind = "connect-proxy" + address = "${POD_IP}" + port = 20000 + namespace = "non-default" + + proxy { + destination_service_name = "web" + destination_service_id = "${SERVICE_ID}" + } + + checks { + name = "Proxy Public Listener" + tcp = "${POD_IP}:20000" + interval = "10s" + deregister_critical_service_after = "10m" + } + + checks { + name = "Destination Alias" + alias_service = "web" + } +} + +services { + id = "${SERVICE_ID}" + name = "web" + address = "${POD_IP}" + port = 0 + namespace = "non-default" +} +EOF +/bin/consul login -method="auth-method" \ + -bearer-token-file="/var/run/secrets/kubernetes.io/serviceaccount/token" \ + -token-sink-file="/consul/connect-inject/acl-token" \ + -namespace="non-default" \ + -meta="pod=${POD_NAMESPACE}/${POD_NAME}" +chmod 444 /consul/connect-inject/acl-token + +/bin/consul services register \ + -token-file="/consul/connect-inject/acl-token" \ + -namespace="non-default" \ + /consul/connect-inject/service.hcl + +# Generate the envoy bootstrap code +/bin/consul connect envoy \ + -proxy-id="${PROXY_SERVICE_ID}" \ + -token-file="/consul/connect-inject/acl-token" \ + -namespace="non-default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml + +# Copy the Consul binary +cp /bin/consul /consul/connect-inject/consul`, + "", + }, + + { + "Whole template, auth method, non-default namespace, mirroring enabled", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + Handler{ + AuthMethod: "auth-method", + EnableNamespaces: true, + ConsulDestinationNamespace: "non-default", // Overridden by mirroring + EnableK8SNSMirroring: true, + }, + k8sNamespace, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" + +# Register the service. The HCL is stored in the volume so that +# the preStop hook can access it to deregister the service. +cat </consul/connect-inject/service.hcl +services { + id = "${PROXY_SERVICE_ID}" + name = "web-sidecar-proxy" + kind = "connect-proxy" + address = "${POD_IP}" + port = 20000 + namespace = "k8snamespace" + + proxy { + destination_service_name = "web" + destination_service_id = "${SERVICE_ID}" + } + + checks { + name = "Proxy Public Listener" + tcp = "${POD_IP}:20000" + interval = "10s" + deregister_critical_service_after = "10m" + } + + checks { + name = "Destination Alias" + alias_service = "web" + } +} + +services { + id = "${SERVICE_ID}" + name = "web" + address = "${POD_IP}" + port = 0 + namespace = "k8snamespace" +} +EOF +/bin/consul login -method="auth-method" \ + -bearer-token-file="/var/run/secrets/kubernetes.io/serviceaccount/token" \ + -token-sink-file="/consul/connect-inject/acl-token" \ + -namespace="default" \ + -meta="pod=${POD_NAMESPACE}/${POD_NAME}" +chmod 444 /consul/connect-inject/acl-token + +/bin/consul services register \ + -token-file="/consul/connect-inject/acl-token" \ + -namespace="k8snamespace" \ + /consul/connect-inject/service.hcl + +# Generate the envoy bootstrap code +/bin/consul connect envoy \ + -proxy-id="${PROXY_SERVICE_ID}" \ + -token-file="/consul/connect-inject/acl-token" \ + -namespace="k8snamespace" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml + +# Copy the Consul binary +cp /bin/consul /consul/connect-inject/consul`, + "", + }, + + { + "Whole template, service defaults and no auth method, non-default namespace", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + Handler{ + WriteServiceDefaults: true, + DefaultProtocol: "http", + EnableNamespaces: true, + ConsulDestinationNamespace: "non-default", + }, + k8sNamespace, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" + +# Register the service. The HCL is stored in the volume so that +# the preStop hook can access it to deregister the service. +cat </consul/connect-inject/service.hcl +services { + id = "${PROXY_SERVICE_ID}" + name = "web-sidecar-proxy" + kind = "connect-proxy" + address = "${POD_IP}" + port = 20000 + namespace = "non-default" + + proxy { + destination_service_name = "web" + destination_service_id = "${SERVICE_ID}" + } + + checks { + name = "Proxy Public Listener" + tcp = "${POD_IP}:20000" + interval = "10s" + deregister_critical_service_after = "10m" + } + + checks { + name = "Destination Alias" + alias_service = "web" + } +} + +services { + id = "${SERVICE_ID}" + name = "web" + address = "${POD_IP}" + port = 0 + namespace = "non-default" +} +EOF +# Create the service-defaults config for the service +cat </consul/connect-inject/service-defaults.hcl +kind = "service-defaults" +name = "web" +protocol = "http" +namespace = "non-default" +EOF +/bin/consul config write -cas -modify-index 0 \ + -namespace="non-default" \ + /consul/connect-inject/service-defaults.hcl || true + +/bin/consul services register \ + -namespace="non-default" \ + /consul/connect-inject/service.hcl + +# Generate the envoy bootstrap code +/bin/consul connect envoy \ + -proxy-id="${PROXY_SERVICE_ID}" \ + -namespace="non-default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml + +# Copy the Consul binary +cp /bin/consul /consul/connect-inject/consul`, + "", + }, + + { + "Whole template, service defaults and auth method, non-default namespace, mirroring enabled", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + Handler{ + AuthMethod: "auth-method", + WriteServiceDefaults: true, + DefaultProtocol: "http", + EnableNamespaces: true, + ConsulDestinationNamespace: "non-default", // Overridden by mirroring + EnableK8SNSMirroring: true, + }, + k8sNamespace, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" + +# Register the service. The HCL is stored in the volume so that +# the preStop hook can access it to deregister the service. +cat </consul/connect-inject/service.hcl +services { + id = "${PROXY_SERVICE_ID}" + name = "web-sidecar-proxy" + kind = "connect-proxy" + address = "${POD_IP}" + port = 20000 + namespace = "k8snamespace" + + proxy { + destination_service_name = "web" + destination_service_id = "${SERVICE_ID}" + } + + checks { + name = "Proxy Public Listener" + tcp = "${POD_IP}:20000" + interval = "10s" + deregister_critical_service_after = "10m" + } + + checks { + name = "Destination Alias" + alias_service = "web" + } +} + +services { + id = "${SERVICE_ID}" + name = "web" + address = "${POD_IP}" + port = 0 + namespace = "k8snamespace" +} +EOF +# Create the service-defaults config for the service +cat </consul/connect-inject/service-defaults.hcl +kind = "service-defaults" +name = "web" +protocol = "http" +namespace = "k8snamespace" +EOF +/bin/consul login -method="auth-method" \ + -bearer-token-file="/var/run/secrets/kubernetes.io/serviceaccount/token" \ + -token-sink-file="/consul/connect-inject/acl-token" \ + -namespace="default" \ + -meta="pod=${POD_NAMESPACE}/${POD_NAME}" +chmod 444 /consul/connect-inject/acl-token +/bin/consul config write -cas -modify-index 0 \ + -token-file="/consul/connect-inject/acl-token" \ + -namespace="k8snamespace" \ + /consul/connect-inject/service-defaults.hcl || true + +/bin/consul services register \ + -token-file="/consul/connect-inject/acl-token" \ + -namespace="k8snamespace" \ + /consul/connect-inject/service.hcl + +# Generate the envoy bootstrap code +/bin/consul connect envoy \ + -proxy-id="${PROXY_SERVICE_ID}" \ + -token-file="/consul/connect-inject/acl-token" \ + -namespace="k8snamespace" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml + +# Copy the Consul binary +cp /bin/consul /consul/connect-inject/consul`, + "", + }, + + { + "Upstream namespace", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + pod.Annotations[annotationUpstreams] = "db.namespace:1234" + return pod + }, + Handler{ + EnableNamespaces: true, + ConsulDestinationNamespace: "default", + }, + k8sNamespace, + `proxy { + destination_service_name = "web" + destination_service_id = "${SERVICE_ID}" + upstreams { + destination_type = "service" + destination_name = "db" + destination_namespace = "namespace" + local_bind_port = 1234 + } + }`, + "", + }, + + { + "Upstream no namespace", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + pod.Annotations[annotationUpstreams] = "db:1234" + return pod + }, + Handler{ + EnableNamespaces: true, + ConsulDestinationNamespace: "default", + }, + k8sNamespace, + `proxy { + destination_service_name = "web" + destination_service_id = "${SERVICE_ID}" + upstreams { + destination_type = "service" + destination_name = "db" + local_bind_port = 1234 + } + }`, + "", + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + h := tt.Handler + container, err := h.containerInit(tt.Pod(minimal()), k8sNamespace) require.NoError(err) actual := strings.Join(container.Command, " ") require.Contains(actual, tt.Cmd) @@ -516,7 +1086,7 @@ func TestHandlerContainerInit_writeServiceDefaultsDefaultProtocol(t *testing.T) }, }, } - container, err := h.containerInit(pod) + container, err := h.containerInit(pod, k8sNamespace) require.NoError(err) actual := strings.Join(container.Command, " ") require.Contains(actual, ` @@ -564,7 +1134,7 @@ func TestHandlerContainerInit_writeServiceDefaultsPodProtocol(t *testing.T) { }, }, } - container, err := h.containerInit(pod) + container, err := h.containerInit(pod, k8sNamespace) require.NoError(err) actual := strings.Join(container.Command, " ") require.Contains(actual, ` @@ -616,7 +1186,7 @@ func TestHandlerContainerInit_authMethod(t *testing.T) { }, }, } - container, err := h.containerInit(pod) + container, err := h.containerInit(pod, k8sNamespace) require.NoError(err) actual := strings.Join(container.Command, " ") require.Contains(actual, ` @@ -666,7 +1236,7 @@ func TestHandlerContainerInit_authMethodAndCentralConfig(t *testing.T) { }, }, } - container, err := h.containerInit(pod) + container, err := h.containerInit(pod, k8sNamespace) require.NoError(err) actual := strings.Join(container.Command, " ") require.Contains(actual, ` @@ -720,7 +1290,7 @@ func TestHandlerContainerInit_noDefaultProtocol(t *testing.T) { }, }, } - container, err := h.containerInit(pod) + container, err := h.containerInit(pod, k8sNamespace) require.NoError(err) actual := strings.Join(container.Command, " ") require.NotContains(actual, ` @@ -759,7 +1329,7 @@ func TestHandlerContainerInit_WithTLS(t *testing.T) { }, }, } - container, err := h.containerInit(pod) + container, err := h.containerInit(pod, k8sNamespace) require.NoError(err) actual := strings.Join(container.Command, " ") require.Contains(actual, ` diff --git a/connect-inject/envoy_sidecar.go b/connect-inject/envoy_sidecar.go index 1df63e2ae3..74d1c2ec86 100644 --- a/connect-inject/envoy_sidecar.go +++ b/connect-inject/envoy_sidecar.go @@ -8,13 +8,22 @@ import ( corev1 "k8s.io/api/core/v1" ) -func (h *Handler) envoySidecar(pod *corev1.Pod) (corev1.Container, error) { +type sidecarContainerCommandData struct { + AuthMethod string + ConsulNamespace string +} + +func (h *Handler) envoySidecar(pod *corev1.Pod, k8sNamespace string) (corev1.Container, error) { + templateData := sidecarContainerCommandData{ + AuthMethod: h.AuthMethod, + ConsulNamespace: h.consulNamespace(k8sNamespace), + } // Render the command var buf bytes.Buffer tpl := template.Must(template.New("root").Parse(strings.TrimSpace( sidecarPreStopCommandTpl))) - err := tpl.Execute(&buf, h.AuthMethod) + err := tpl.Execute(&buf, &templateData) if err != nil { return corev1.Container{}, err } @@ -74,11 +83,15 @@ func (h *Handler) envoySidecar(pod *corev1.Pod) (corev1.Container, error) { const sidecarPreStopCommandTpl = ` /consul/connect-inject/consul services deregister \ - {{- if . }} + {{- if .AuthMethod }} -token-file="/consul/connect-inject/acl-token" \ {{- end }} + {{- if .ConsulNamespace }} + -namespace="{{ .ConsulNamespace }}" \ + {{- end }} /consul/connect-inject/service.hcl -{{- if . }} + +{{- if .AuthMethod }} && /consul/connect-inject/consul logout \ -token-file="/consul/connect-inject/acl-token" {{- end}} diff --git a/connect-inject/envoy_sidecar_test.go b/connect-inject/envoy_sidecar_test.go index ec7a1e865a..e16147302d 100644 --- a/connect-inject/envoy_sidecar_test.go +++ b/connect-inject/envoy_sidecar_test.go @@ -27,7 +27,7 @@ func TestHandlerEnvoySidecar(t *testing.T) { }, }, } - container, err := h.envoySidecar(pod) + container, err := h.envoySidecar(pod, k8sNamespace) require.NoError(err) require.Equal(container.Command, []string{ "envoy", @@ -82,7 +82,7 @@ func TestHandlerEnvoySidecar_AuthMethod(t *testing.T) { }, }, } - container, err := h.envoySidecar(pod) + container, err := h.envoySidecar(pod, k8sNamespace) require.NoError(err) preStopCommand := strings.Join(container.Lifecycle.PreStop.Exec.Command, " ") @@ -116,7 +116,7 @@ func TestHandlerEnvoySidecar_WithTLS(t *testing.T) { }, }, } - container, err := h.envoySidecar(pod) + container, err := h.envoySidecar(pod, k8sNamespace) require.NoError(err) require.Equal(container.Env, []corev1.EnvVar{ { @@ -135,3 +135,71 @@ func TestHandlerEnvoySidecar_WithTLS(t *testing.T) { }, }) } + +// Test that the pre-stop command is modified when namespaces +// are enabled. A single test is enough here, since the exclusion +// cases are tested in the other cases above and there are numerous +// tests specifically for `h.consulNamespace` in handler_test.go +func TestHandlerEnvoySidecar_Namespaces(t *testing.T) { + require := require.New(t) + h := Handler{ + EnableNamespaces: true, + ConsulDestinationNamespace: k8sNamespace, + } + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := h.envoySidecar(pod, k8sNamespace) + require.NoError(err) + + preStopCommand := strings.Join(container.Lifecycle.PreStop.Exec.Command, " ") + require.Equal(preStopCommand, `/bin/sh -ec /consul/connect-inject/consul services deregister \ + -namespace="k8snamespace" \ + /consul/connect-inject/service.hcl`) +} + +func TestHandlerEnvoySidecar_NamespacesAndAuthMethod(t *testing.T) { + require := require.New(t) + h := Handler{ + EnableNamespaces: true, + ConsulDestinationNamespace: k8sNamespace, + AuthMethod: "test-auth-method", + } + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := h.envoySidecar(pod, k8sNamespace) + require.NoError(err) + + preStopCommand := strings.Join(container.Lifecycle.PreStop.Exec.Command, " ") + require.Equal(preStopCommand, `/bin/sh -ec /consul/connect-inject/consul services deregister \ + -token-file="/consul/connect-inject/acl-token" \ + -namespace="k8snamespace" \ + /consul/connect-inject/service.hcl +&& /consul/connect-inject/consul logout \ + -token-file="/consul/connect-inject/acl-token"`) +} diff --git a/connect-inject/handler.go b/connect-inject/handler.go index a116d6ee06..52245b9cdc 100644 --- a/connect-inject/handler.go +++ b/connect-inject/handler.go @@ -5,10 +5,11 @@ import ( "errors" "fmt" "io/ioutil" - "log" "net/http" "strconv" + "github.com/deckarep/golang-set" + "github.com/hashicorp/consul/api" "github.com/hashicorp/go-hclog" "github.com/mattbaird/jsonpatch" "k8s.io/api/admission/v1beta1" @@ -19,8 +20,8 @@ import ( ) const ( - DefaultConsulImage = "consul:1.5.0" - DefaultEnvoyImage = "envoyproxy/envoy-alpine:v1.9.1" + DefaultConsulImage = "consul:1.7.0" + DefaultEnvoyImage = "envoyproxy/envoy-alpine:v1.13.0" ) const ( @@ -81,16 +82,15 @@ var ( codecs = serializer.NewCodecFactory(runtime.NewScheme()) deserializer = codecs.UniversalDeserializer() - // kubeSystemNamespaces is a list of namespaces that are considered + // kubeSystemNamespaces is a set of namespaces that are considered // "system" level namespaces and are always skipped (never injected). - kubeSystemNamespaces = []string{ - metav1.NamespaceSystem, - metav1.NamespacePublic, - } + kubeSystemNamespaces = mapset.NewSetWith(metav1.NamespaceSystem, metav1.NamespacePublic) ) // Handler is the HTTP handler for admission webhooks. type Handler struct { + ConsulClient *api.Client + // ImageConsul is the container image for Consul to use. // ImageEnvoy is the container image for Envoy to use. // @@ -124,6 +124,45 @@ type Handler struct { // If not set, will use HTTP. ConsulCACert string + // EnableNamespaces indicates that a user is running Consul Enterprise + // with version 1.7+ which is namespace aware. It enables Consul namespaces, + // with injection into either a single Consul namespace or mirrored from + // k8s namespaces. + EnableNamespaces bool + + // AllowK8sNamespacesSet is a set of k8s namespaces to explicitly allow for + // injection. It supports the special character `*` which indicates that + // all k8s namespaces are eligible unless explicitly denied. This filter + // is applied before checking pod annotations. + AllowK8sNamespacesSet mapset.Set + + // DenyK8sNamespacesSet is a set of k8s namespaces to explicitly deny + // injection and thus service registration with Consul. An empty set + // means that no namespaces are removed from consideration. This filter + // takes precedence over AllowK8sNamespacesSet. + DenyK8sNamespacesSet mapset.Set + + // ConsulDestinationNamespace is the name of the Consul namespace to register all + // injected services into if Consul namespaces are enabled and mirroring + // is disabled. This may be set, but will not be used if mirroring is enabled. + ConsulDestinationNamespace string + + // EnableK8SNSMirroring causes Consul namespaces to be created to match the + // k8s namespace of any service being registered into Consul. Services are + // registered into the Consul namespace that mirrors their k8s namespace. + EnableK8SNSMirroring bool + + // K8SNSMirroringPrefix is an optional prefix that can be added to the Consul + // namespaces created while mirroring. For example, if it is set to "k8s-", + // then the k8s `default` namespace will be mirrored in Consul's + // `k8s-default` namespace. + K8SNSMirroringPrefix string + + // CrossNamespaceACLPolicy is the name of the ACL policy to attach to + // any created Consul namespaces to allow cross namespace service discovery. + // Only necessary if ACLs are enabled. + CrossNamespaceACLPolicy string + // Log Log hclog.Logger } @@ -137,7 +176,7 @@ func (h *Handler) Handle(w http.ResponseWriter, r *http.Request) { if ct := r.Header.Get("Content-Type"); ct != "application/json" { msg := fmt.Sprintf("Invalid content-type: %q", ct) http.Error(w, msg, http.StatusBadRequest) - h.Log.Error("Error on request", "Error", msg, "Code", http.StatusBadRequest) + h.Log.Error("Error on request", "err", msg, "Code", http.StatusBadRequest) return } @@ -147,21 +186,21 @@ func (h *Handler) Handle(w http.ResponseWriter, r *http.Request) { if body, err = ioutil.ReadAll(r.Body); err != nil { msg := fmt.Sprintf("Error reading request body: %s", err) http.Error(w, msg, http.StatusBadRequest) - h.Log.Error("Error on request", "Error", msg, "Code", http.StatusBadRequest) + h.Log.Error("Error on request", "err", msg, "Code", http.StatusBadRequest) return } } if len(body) == 0 { msg := "Empty request body" http.Error(w, msg, http.StatusBadRequest) - h.Log.Error("Error on request", "Error", msg, "Code", http.StatusBadRequest) + h.Log.Error("Error on request", "err", msg, "Code", http.StatusBadRequest) return } var admReq v1beta1.AdmissionReview var admResp v1beta1.AdmissionReview if _, _, err := deserializer.Decode(body, nil, &admReq); err != nil { - h.Log.Error("Could not decode admission request", "Error", err) + h.Log.Error("Could not decode admission request", "err", err) admResp.Response = admissionError(err) } else { admResp.Response = h.Mutate(admReq.Request) @@ -171,12 +210,12 @@ func (h *Handler) Handle(w http.ResponseWriter, r *http.Request) { if err != nil { msg := fmt.Sprintf("Error marshalling admission response: %s", err) http.Error(w, msg, http.StatusInternalServerError) - h.Log.Error("Error on request", "Error", msg, "Code", http.StatusInternalServerError) + h.Log.Error("Error on request", "err", msg, "Code", http.StatusInternalServerError) return } if _, err := w.Write(resp); err != nil { - h.Log.Error("Error writing response", "Error", err) + h.Log.Error("Error writing response", "err", err) } } @@ -186,10 +225,10 @@ func (h *Handler) Mutate(req *v1beta1.AdmissionRequest) *v1beta1.AdmissionRespon // Decode the pod from the request var pod corev1.Pod if err := json.Unmarshal(req.Object.Raw, &pod); err != nil { - log.Printf("Could not unmarshal request to pod: %s", err) + h.Log.Error("Could not unmarshal request to pod", "err", err) return &v1beta1.AdmissionResponse{ Result: &metav1.Status{ - Message: err.Error(), + Message: fmt.Sprintf("Could not unmarshal request to pod: %s", err), }, } } @@ -206,9 +245,10 @@ func (h *Handler) Mutate(req *v1beta1.AdmissionRequest) *v1beta1.AdmissionRespon // Setup the default annotation values that are used for the container. // This MUST be done before shouldInject is called since k. if err := h.defaultAnnotations(&pod, &patches); err != nil { + h.Log.Error("Error creating default annotations", "err", err, "Request Name", req.Name) return &v1beta1.AdmissionResponse{ Result: &metav1.Status{ - Message: err.Error(), + Message: fmt.Sprintf("Error creating default annotations: %s", err), }, } } @@ -216,6 +256,7 @@ func (h *Handler) Mutate(req *v1beta1.AdmissionRequest) *v1beta1.AdmissionRespon // Check if we should inject, for example we don't inject in the // system namespaces. if shouldInject, err := h.shouldInject(&pod, req.Namespace); err != nil { + h.Log.Error("Error checking if should inject", "err", err, "Request Name", req.Name) return &v1beta1.AdmissionResponse{ Result: &metav1.Status{ Message: fmt.Sprintf("Error checking if should inject: %s", err), @@ -249,8 +290,9 @@ func (h *Handler) Mutate(req *v1beta1.AdmissionRequest) *v1beta1.AdmissionRespon // Add the init container that registers the service and sets up // the Envoy configuration. - container, err := h.containerInit(&pod) + container, err := h.containerInit(&pod, req.Namespace) if err != nil { + h.Log.Error("Error configuring injection init container", "err", err, "Request Name", req.Name) return &v1beta1.AdmissionResponse{ Result: &metav1.Status{ Message: fmt.Sprintf("Error configuring injection init container: %s", err), @@ -263,8 +305,9 @@ func (h *Handler) Mutate(req *v1beta1.AdmissionRequest) *v1beta1.AdmissionRespon "/spec/initContainers")...) // Add the Envoy and lifecycle sidecars. - esContainer, err := h.envoySidecar(&pod) + esContainer, err := h.envoySidecar(&pod, req.Namespace) if err != nil { + h.Log.Error("Error configuring injection sidecar container", "err", err, "Request Name", req.Name) return &v1beta1.AdmissionResponse{ Result: &metav1.Status{ Message: fmt.Sprintf("Error configuring injection sidecar container: %s", err), @@ -288,10 +331,10 @@ func (h *Handler) Mutate(req *v1beta1.AdmissionRequest) *v1beta1.AdmissionRespon var err error patch, err = json.Marshal(patches) if err != nil { - log.Printf("Could not marshal patches: %s", err) + h.Log.Error("Could not marshal patches", "err", err, "Request Name", req.Name) return &v1beta1.AdmissionResponse{ Result: &metav1.Status{ - Message: err.Error(), + Message: fmt.Sprintf("Could not marshal patches: %s", err), }, } } @@ -301,14 +344,40 @@ func (h *Handler) Mutate(req *v1beta1.AdmissionRequest) *v1beta1.AdmissionRespon resp.PatchType = &patchType } + // Check and potentially create Consul resources. This is done after + // all patches are created to guarantee no errors were encountered in + // that process before modifying the Consul cluster. + if h.EnableNamespaces { + // Check if the namespace exists. If not, create it. + if err := h.checkAndCreateNamespace(h.consulNamespace(req.Namespace)); err != nil { + h.Log.Error("Error checking or creating namespace", "err", err, + "Namespace", h.consulNamespace(req.Namespace), "Request Name", req.Name) + return &v1beta1.AdmissionResponse{ + Result: &metav1.Status{ + Message: fmt.Sprintf("Error checking or creating namespace: %s", err), + }, + } + } + } + return resp } func (h *Handler) shouldInject(pod *corev1.Pod, namespace string) (bool, error) { - // Don't inject in the Kubernetes system namespaces - for _, ns := range kubeSystemNamespaces { - if namespace == ns { + if kubeSystemNamespaces.Contains(namespace) { + return false, nil + } + + // Namespace logic + if h.EnableNamespaces { + // If in deny list, don't inject + if h.DenyK8sNamespacesSet.Contains(namespace) { + return false, nil + } + + // If not in allow list or allow list is not *, don't inject + if !h.AllowK8sNamespacesSet.Contains("*") && !h.AllowK8sNamespacesSet.Contains(namespace) { return false, nil } } @@ -398,6 +467,58 @@ func (h *Handler) defaultAnnotations(pod *corev1.Pod, patches *[]jsonpatch.JsonP return nil } +// consulNamespace returns the namespace that a service should be +// registered in based on the namespace options. It returns an +// empty string if namespaces aren't enabled. +func (h *Handler) consulNamespace(ns string) string { + if !h.EnableNamespaces { + return "" + } + + // Mirroring takes precedence + if h.EnableK8SNSMirroring { + return fmt.Sprintf("%s%s", h.K8SNSMirroringPrefix, ns) + } else { + return h.ConsulDestinationNamespace + } +} + +func (h *Handler) checkAndCreateNamespace(ns string) error { + // Check if the Consul namespace exists + namespaceInfo, _, err := h.ConsulClient.Namespaces().Read(ns, nil) + if err != nil { + return err + } + + // If not, create it + if namespaceInfo == nil { + var aclConfig api.NamespaceACLConfig + if h.CrossNamespaceACLPolicy != "" { + // Create the ACLs config for the cross-Consul-namespace + // default policy that needs to be attached + aclConfig = api.NamespaceACLConfig{ + PolicyDefaults: []api.ACLLink{ + {Name: h.CrossNamespaceACLPolicy}, + }, + } + } + + consulNamespace := api.Namespace{ + Name: ns, + Description: "Auto-generated by a Connect Injector", + ACLs: &aclConfig, + Meta: map[string]string{"external-source": "kubernetes"}, + } + + _, _, err = h.ConsulClient.Namespaces().Create(&consulNamespace, nil) + if err != nil { + return err + } + } + + return nil +} + func portValue(pod *corev1.Pod, value string) (int32, error) { // First search for the named port for _, c := range pod.Spec.Containers { diff --git a/connect-inject/handler_ent_test.go b/connect-inject/handler_ent_test.go new file mode 100644 index 0000000000..3ec89b1ee6 --- /dev/null +++ b/connect-inject/handler_ent_test.go @@ -0,0 +1,511 @@ +// +build enterprise + +package connectinject + +import ( + "testing" + "time" + + "github.com/deckarep/golang-set" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" + "k8s.io/api/admission/v1beta1" + corev1 "k8s.io/api/core/v1" +) + +// This tests the checkAndCreate namespace function that is called +// in handler.Mutate. Patch generation is tested in the non-enterprise +// tests. Other namespace-specific logic is tested directly in the +// specific methods (shouldInject, consulNamespace). +func TestHandler_MutateWithNamespaces(t *testing.T) { + t.Parallel() + + basicSpec := corev1.PodSpec{ + Containers: []corev1.Container{ + corev1.Container{ + Name: "web", + }, + }, + } + + cases := []struct { + Name string + Handler Handler + Req v1beta1.AdmissionRequest + ExpectedNamespaces []string + }{ + { + "single destination namespace 'default' from k8s 'default'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "default", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "default", + }, + []string{"default"}, + }, + + { + "single destination namespace 'default' from k8s 'non-default'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "default", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "non-default", + }, + []string{"default"}, + }, + + { + "single destination namespace 'dest' from k8s 'default'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "dest", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "default", + }, + []string{"default", "dest"}, + }, + + { + "single destination namespace 'dest' from k8s 'non-default'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "dest", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "non-default", + }, + []string{"default", "dest"}, + }, + + { + "mirroring from k8s 'default'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "default", // will be overridden + EnableK8SNSMirroring: true, + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "default", + }, + []string{"default"}, + }, + + { + "mirroring from k8s 'dest'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "default", // will be overridden + EnableK8SNSMirroring: true, + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "dest", + }, + []string{"default", "dest"}, + }, + + { + "mirroring with prefix from k8s 'default'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "default", // will be overridden + EnableK8SNSMirroring: true, + K8SNSMirroringPrefix: "k8s-", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "default", + }, + []string{"default", "k8s-default"}, + }, + + { + "mirroring with prefix from k8s 'dest'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "default", // will be overridden + EnableK8SNSMirroring: true, + K8SNSMirroringPrefix: "k8s-", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "dest", + }, + []string{"default", "k8s-dest"}, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + // Set up consul server + a, err := testutil.NewTestServerT(t) + require.NoError(err) + defer a.Stop() + + // Set up consul client + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) + + // Add the client to the test's handler + tt.Handler.ConsulClient = client + + // Mutate! + resp := tt.Handler.Mutate(&tt.Req) + require.Equal(resp.Allowed, true) + + // Check all the namespace things + // Check that we have the right number of namespaces + namespaces, _, err := client.Namespaces().List(&api.QueryOptions{}) + require.NoError(err) + require.Len(namespaces, len(tt.ExpectedNamespaces)) + + // Check the namespace details + for _, ns := range tt.ExpectedNamespaces { + actNamespace, _, err := client.Namespaces().Read(ns, &api.QueryOptions{}) + require.NoErrorf(err, "error getting namespace %s", ns) + require.NotNilf(actNamespace, "namespace %s was nil", ns) + require.Equalf(ns, actNamespace.Name, "namespace %s was improperly named", ns) + + // Check created namespace properties + if ns != "default" { + require.Equalf("Auto-generated by a Connect Injector", actNamespace.Description, + "wrong namespace description for namespace %s", ns) + require.Containsf(actNamespace.Meta, "external-source", + "namespace %s does not contain external-source metadata key", ns) + require.Equalf("kubernetes", actNamespace.Meta["external-source"], + "namespace %s has wrong value for external-source metadata key", ns) + } + + } + }) + } +} + +// Tests that the correct cross-namespace policy is +// added to created namespaces. +func TestHandler_MutateWithNamespaces_ACLs(t *testing.T) { + basicSpec := corev1.PodSpec{ + Containers: []corev1.Container{ + corev1.Container{ + Name: "web", + }, + }, + } + + cases := []struct { + Name string + Handler Handler + Req v1beta1.AdmissionRequest + ExpectedNamespaces []string + }{ + { + "acls + single destination namespace 'default' from k8s 'default'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "default", + CrossNamespaceACLPolicy: "cross-namespace-policy", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "default", + }, + []string{"default"}, + }, + + { + "acls + single destination namespace 'default' from k8s 'non-default'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "default", + CrossNamespaceACLPolicy: "cross-namespace-policy", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "non-default", + }, + []string{"default"}, + }, + + { + "acls + single destination namespace 'dest' from k8s 'default'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "dest", + CrossNamespaceACLPolicy: "cross-namespace-policy", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "default", + }, + []string{"default", "dest"}, + }, + + { + "acls + single destination namespace 'dest' from k8s 'non-default'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "dest", + CrossNamespaceACLPolicy: "cross-namespace-policy", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "non-default", + }, + []string{"default", "dest"}, + }, + + { + "acls + mirroring from k8s 'default'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "default", // will be overridden + EnableK8SNSMirroring: true, + CrossNamespaceACLPolicy: "cross-namespace-policy", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "default", + }, + []string{"default"}, + }, + + { + "acls + mirroring from k8s 'dest'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "default", // will be overridden + EnableK8SNSMirroring: true, + CrossNamespaceACLPolicy: "cross-namespace-policy", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "dest", + }, + []string{"default", "dest"}, + }, + + { + "acls + mirroring with prefix from k8s 'default'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "default", // will be overridden + EnableK8SNSMirroring: true, + K8SNSMirroringPrefix: "k8s-", + CrossNamespaceACLPolicy: "cross-namespace-policy", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "default", + }, + []string{"default", "k8s-default"}, + }, + + { + "acls + mirroring with prefix from k8s 'dest'", + Handler{ + Log: hclog.Default().Named("handler"), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: "default", // will be overridden + EnableK8SNSMirroring: true, + K8SNSMirroringPrefix: "k8s-", + CrossNamespaceACLPolicy: "cross-namespace-policy", + }, + v1beta1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + Namespace: "dest", + }, + []string{"default", "k8s-dest"}, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + // Set up consul server + a, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.ACL.Enabled = true + }) + require.NoError(t, err) + defer a.Stop() + + // Set up a client for bootstrapping + bootClient, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + // Bootstrap the server and get the bootstrap token + var bootstrapResp *api.ACLToken + timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, t, func(r *retry.R) { + bootstrapResp, _, err = bootClient.ACL().Bootstrap() + require.NoError(r, err) + }) + bootstrapToken := bootstrapResp.SecretID + require.NotEmpty(t, bootstrapToken) + + // Set up consul client + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + Token: bootstrapToken, + }) + require.NoError(t, err) + + // Add the client to the test's handler + tt.Handler.ConsulClient = client + + // Create cross namespace policy + // This would have been created by the acl bootstrapper in the + // default namespace to be attached to all created namespaces. + crossNamespaceRules := `namespace_prefix "" { + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } +} ` + + policyTmpl := api.ACLPolicy{ + Name: "cross-namespace-policy", + Description: "Policy to allow permissions to cross Consul namespaces for k8s services", + Rules: crossNamespaceRules, + } + + _, _, err = client.ACL().PolicyCreate(&policyTmpl, &api.WriteOptions{}) + require.NoError(t, err) + + // Mutate! + resp := tt.Handler.Mutate(&tt.Req) + require.Equal(t, resp.Allowed, true) + + // Check all the namespace things + // Check that we have the right number of namespaces + namespaces, _, err := client.Namespaces().List(&api.QueryOptions{}) + require.NoError(t, err) + require.Len(t, namespaces, len(tt.ExpectedNamespaces)) + + // Check the namespace details + for _, ns := range tt.ExpectedNamespaces { + actNamespace, _, err := client.Namespaces().Read(ns, &api.QueryOptions{}) + require.NoErrorf(t, err, "error getting namespace %s", ns) + require.NotNilf(t, actNamespace, "namespace %s was nil", ns) + require.Equalf(t, ns, actNamespace.Name, "namespace %s was improperly named", ns) + + // Check created namespace properties + if ns != "default" { + require.Equalf(t, "Auto-generated by a Connect Injector", actNamespace.Description, + "wrong namespace description for namespace %s", ns) + require.Containsf(t, actNamespace.Meta, "external-source", + "namespace %s does not contain external-source metadata key", ns) + require.Equalf(t, "kubernetes", actNamespace.Meta["external-source"], + "namespace %s has wrong value for external-source metadata key", ns) + + // Check for ACL policy things + // The acl bootstrapper will update the `default` namespace, so that + // can't be tested here. + require.NotNilf(t, actNamespace.ACLs, "ACLs was nil for namespace %s", ns) + require.Lenf(t, actNamespace.ACLs.PolicyDefaults, 1, "wrong length for PolicyDefaults in namespace %s", ns) + require.Equalf(t, "cross-namespace-policy", actNamespace.ACLs.PolicyDefaults[0].Name, + "wrong policy name for namespace %s", ns) + } + + } + }) + } +} diff --git a/connect-inject/handler_test.go b/connect-inject/handler_test.go index bc0a8906ee..5e1b8e5f0d 100644 --- a/connect-inject/handler_test.go +++ b/connect-inject/handler_test.go @@ -6,6 +6,7 @@ import ( "net/http/httptest" "testing" + "github.com/deckarep/golang-set" "github.com/hashicorp/go-hclog" "github.com/mattbaird/jsonpatch" "github.com/stretchr/testify/require" @@ -553,7 +554,7 @@ func TestHandlerDefaultAnnotations(t *testing.T) { if len(actual) == 0 { actual = nil } - require.Equal(actual, tt.Expected) + require.Equal(tt.Expected, actual) }) } } @@ -640,7 +641,305 @@ func TestHandlerPortValue(t *testing.T) { return } - require.Equal(port, tt.Expected) + require.Equal(tt.Expected, port) + }) + } +} + +// Test consulNamespace function +func TestConsulNamespace(t *testing.T) { + cases := []struct { + Name string + EnableNamespaces bool + ConsulDestinationNamespace string + EnableK8SNSMirroring bool + K8SNSMirroringPrefix string + K8sNamespace string + Expected string + }{ + { + "namespaces disabled", + false, + "default", + false, + "", + "namespace", + "", + }, + + { + "namespaces disabled, mirroring enabled", + false, + "default", + true, + "", + "namespace", + "", + }, + + { + "namespaces disabled, mirroring enabled, prefix defined", + false, + "default", + true, + "test-", + "namespace", + "", + }, + + { + "namespaces enabled, mirroring disabled", + true, + "default", + false, + "", + "namespace", + "default", + }, + + { + "namespaces enabled, mirroring disabled, prefix defined", + true, + "default", + false, + "test-", + "namespace", + "default", + }, + + { + "namespaces enabled, mirroring enabled", + true, + "default", + true, + "", + "namespace", + "namespace", + }, + + { + "namespaces enabled, mirroring enabled, prefix defined", + true, + "default", + true, + "test-", + "namespace", + "test-namespace", + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + h := Handler{ + EnableNamespaces: tt.EnableNamespaces, + ConsulDestinationNamespace: tt.ConsulDestinationNamespace, + EnableK8SNSMirroring: tt.EnableK8SNSMirroring, + K8SNSMirroringPrefix: tt.K8SNSMirroringPrefix, + } + + ns := h.consulNamespace(tt.K8sNamespace) + + require.Equal(tt.Expected, ns) + }) + } +} + +// Test shouldInject function +func TestShouldInject(t *testing.T) { + cases := []struct { + Name string + Pod *corev1.Pod + K8sNamespace string + EnableNamespaces bool + AllowK8sNamespacesSet mapset.Set + DenyK8sNamespacesSet mapset.Set + Expected bool + }{ + { + "kube-system not injected", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + // Service annotation is required for injection + annotationService: "testing", + }, + }, + }, + "kube-system", + false, + mapset.NewSet(), + mapset.NewSet(), + false, + }, + { + "kube-public not injected", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "testing", + }, + }, + }, + "kube-public", + false, + mapset.NewSet(), + mapset.NewSet(), + false, + }, + { + "namespaces disabled", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "testing", + }, + }, + }, + "default", + false, + mapset.NewSet(), + mapset.NewSet(), + true, + }, + { + "namespaces enabled, empty allow/deny lists", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSet(), + mapset.NewSet(), + false, + }, + { + "namespaces enabled, allow *", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSetWith("*"), + mapset.NewSet(), + true, + }, + { + "namespaces enabled, allow default", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSetWith("default"), + mapset.NewSet(), + true, + }, + { + "namespaces enabled, allow * and default", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSetWith("*", "default"), + mapset.NewSet(), + true, + }, + { + "namespaces enabled, allow only ns1 and ns2", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSetWith("ns1", "ns2"), + mapset.NewSet(), + false, + }, + { + "namespaces enabled, deny default ns", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSet(), + mapset.NewSetWith("default"), + false, + }, + { + "namespaces enabled, allow *, deny default ns", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSetWith("*"), + mapset.NewSetWith("default"), + false, + }, + { + "namespaces enabled, default ns in both allow and deny lists", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSetWith("default"), + mapset.NewSetWith("default"), + false, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + h := Handler{ + RequireAnnotation: false, + EnableNamespaces: tt.EnableNamespaces, + AllowK8sNamespacesSet: tt.AllowK8sNamespacesSet, + DenyK8sNamespacesSet: tt.DenyK8sNamespacesSet, + } + + injected, err := h.shouldInject(tt.Pod, tt.K8sNamespace) + + require.Equal(nil, err) + require.Equal(tt.Expected, injected) }) } } diff --git a/go.mod b/go.mod index dc2452b196..c6a3d7b81d 100644 --- a/go.mod +++ b/go.mod @@ -7,16 +7,17 @@ require ( github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect github.com/cenkalti/backoff v2.1.1+incompatible github.com/coredns/coredns v1.2.2 // indirect + github.com/deckarep/golang-set v1.7.1 github.com/docker/go-connections v0.4.0 // indirect github.com/elazarl/go-bindata-assetfs v1.0.0 // indirect github.com/gogo/protobuf v1.3.1 // indirect github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 // indirect github.com/googleapis/gnostic v0.3.1 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/hashicorp/consul v1.6.2 - github.com/hashicorp/consul/api v1.3.0 - github.com/hashicorp/consul/sdk v0.3.0 - github.com/hashicorp/go-hclog v0.9.2 + github.com/hashicorp/consul v1.7.1 + github.com/hashicorp/consul/api v1.4.0 + github.com/hashicorp/consul/sdk v0.4.0 + github.com/hashicorp/go-hclog v0.12.0 github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/golang-lru v0.5.3 // indirect github.com/hashicorp/hil v0.0.0-20170627220502-fa9f258a9250 // indirect @@ -36,7 +37,6 @@ require ( golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 // indirect golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect - gopkg.in/yaml.v2 v2.2.7 // indirect k8s.io/api v0.0.0-20190325185214-7544f9db76f6 k8s.io/apimachinery v0.0.0-20190223001710-c182ff3b9841 k8s.io/client-go v8.0.0+incompatible diff --git a/go.sum b/go.sum index a4f9cdb8a2..b25980a1eb 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,6 @@ github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQh github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.15.24 h1:xLAdTA/ore6xdPAljzZRed7IGqQgC+nY+ERS5vaj4Ro= -github.com/aws/aws-sdk-go v1.15.24/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.25.41 h1:/hj7nZ0586wFqpwjNpzWiUTwtaMgxAZNZKHay80MdXw= github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= @@ -55,6 +53,8 @@ github.com/coredns/coredns v1.2.2/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ= +github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 h1:lrWnAyy/F72MbxIxFUzKmcMCdt9Oi8RzpAxzTNQHD7o= github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= @@ -79,13 +79,13 @@ github.com/envoyproxy/protoc-gen-validate v0.0.14 h1:YBW6/cKy9prEGRYLnaGa4IDhzxZ github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= @@ -126,20 +126,12 @@ github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORR github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/hashicorp/consul v1.6.2 h1:OjnFfc1vHPLtwrYAMC9HlJ4WTSmgBTq2erWuBERm3hY= -github.com/hashicorp/consul v1.6.2/go.mod h1:kZmEKWDGa47nEdLEbvJyh14uTBpG37Wo6N39Vfpo7uE= -github.com/hashicorp/consul v1.7.0-beta2 h1:JI52G27QwlrqRwGZVO90BUeotfQ/r16/r+GwnaArMFA= -github.com/hashicorp/consul v1.7.0-beta2/go.mod h1:YLOm5bVFXiBhcZpiFHvdyle8ZgL3EtvD45FSr7WzQFo= -github.com/hashicorp/consul/api v1.2.0/go.mod h1:1SIkFYi2ZTXUE5Kgt179+4hH33djo11+0Eo2XgTAtkw= -github.com/hashicorp/consul/api v1.2.1-0.20191220171653-e01f6913516b h1:zCqF4dcqgKvTffiUpOmOSfaWYjMK0jmsgYIIPA1mV18= -github.com/hashicorp/consul/api v1.2.1-0.20191220171653-e01f6913516b/go.mod h1:1SIkFYi2ZTXUE5Kgt179+4hH33djo11+0Eo2XgTAtkw= -github.com/hashicorp/consul/api v1.3.0 h1:HXNYlRkkM/t+Y/Yhxtwcy02dlYwIaoxzvxPnS+cqy78= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.2.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.2.1-0.20191220171653-e01f6913516b h1:MPk1R7pBr8uI1wUeDLvo2UYNwUeY00lixjfYvrzDJ20= -github.com/hashicorp/consul/sdk v0.2.1-0.20191220171653-e01f6913516b/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0 h1:UOxjlb4xVNF93jak1mzzoBatyFju9nrkxpVwIp/QqxQ= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul v1.7.1 h1:HgnJOJWGc8PIqRYa5VKT3KXB5fqYqloX/u5Bk1bY3/8= +github.com/hashicorp/consul v1.7.1/go.mod h1:vKfXmSQNl6HwO/JqQ2DDLzisBDV49y+JVTkrdW1cnSU= +github.com/hashicorp/consul/api v1.4.0 h1:jfESivXnO5uLdH650JU/6AnjRoHrLhULq0FnC3Kp9EY= +github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= +github.com/hashicorp/consul/sdk v0.4.0 h1:zBtCfKJZcJDBvSCkQJch4ulp59m1rATFLKwNo/LYY30= +github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-bexpr v0.1.2 h1:ijMXI4qERbzxbCnkxmfUtwMyjrrk3y+Vt0MxojNCbBs= @@ -149,21 +141,19 @@ github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de/go.mod h1: github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-discover v0.0.0-20190403160810-22221edb15cd h1:SynRxs8h2h7lLSA5py5a3WWkYpImhREtju0CuRd97wc= -github.com/hashicorp/go-discover v0.0.0-20190403160810-22221edb15cd/go.mod h1:ueUgD9BeIocT7QNuvxSyJyPAM9dfifBcaWmeybb67OY= +github.com/hashicorp/go-connlimit v0.2.0 h1:OZjcfNxH/hPh/bT2Iw5yOJcLzz+zuIWpsp3I1S4Pjw4= +github.com/hashicorp/go-connlimit v0.2.0/go.mod h1:OUj9FGL1tPIhl/2RCfzYHrIiWj+VVPGNyVPnUX8AqS0= github.com/hashicorp/go-discover v0.0.0-20191202160150-7ec2cfbda7a2 h1:r7GtRT+VXoM5WqHMxSVDIKgVCfK9T8CoS51RDKeOjBM= github.com/hashicorp/go-discover v0.0.0-20191202160150-7ec2cfbda7a2/go.mod h1:NnH5X4UCBEBdTuK2L8s4e4ilJm3UmGX0bANHCz0HSs0= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc= github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71 h1:yxxFgVz31vFoKKTtRUNbXLNe4GFnbLKqg+0N7yG42L8= -github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71/go.mod h1:kbfItVoBJwCfKXDXN4YoAXjxcFVZ7MRrJzyTX6H4giE= github.com/hashicorp/go-memdb v1.0.3 h1:iiqzNk8jKB6/sLRj623Ui/Vi1zf21LOUpgzGjTge6a8= github.com/hashicorp/go-memdb v1.0.3/go.mod h1:LWQ8R70vPrS4OEY9k28D2z8/Zzyu34NVzeRibGAzHO0= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -177,9 +167,10 @@ github.com/hashicorp/go-raftchunking v0.6.1/go.mod h1:cGlg3JtDy7qy6c/3Bu660Mic1J github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE= github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= @@ -206,12 +197,14 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/mdns v1.0.1 h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM= -github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.1.6 h1:ouPxvwKYaNZe+eTcHxYP0EblPduVLvIPycul+vv8his= +github.com/hashicorp/memberlist v0.1.6/go.mod h1:5VDNHjqFMgEcclnwmkCnC99IPwxBmIsxwY8qn+Nl0H4= github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 h1:lc3c72qGlIMDqQpQH82Y4vaglRMMFdJbziYWriR4UcE= github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q= github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs= github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.1.2 h1:oxEL5DDeurYxLd3UbcY/hccgSPhLLpiBZ1YxtWEq59c= +github.com/hashicorp/raft v1.1.2/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= @@ -233,8 +226,6 @@ github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da h1:FjHUJJ7oBW4G/9j1KzlHaXL09LyMVM9rupS39lncbXk= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62 h1:JHCT6xuyPUrbbgAPE/3dqlvUKzRHMNuTBKKUb6OeR/k= @@ -247,6 +238,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -263,14 +256,21 @@ github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a h1:+J2gw7Bw77w github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0= github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.22 h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc= -github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.26 h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= @@ -375,8 +375,6 @@ github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191106202628-ed6320f186d4 h1:PDpCLFAH/YIX0QpHPf2eO7L4rC2OOirBrKtXTLLiNTY= golang.org/x/crypto v0.0.0-20191106202628-ed6320f186d4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -415,16 +413,18 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190508220229-2d0786266e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd h1:3x5uuvBgE6oaXJjCOvpCC1IpgJogqQ+PqGGU3ZxAgII= -golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= @@ -464,6 +464,8 @@ gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzyc gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= @@ -479,8 +481,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= istio.io/gogo-genproto v0.0.0-20190124151557-6d926a6e6feb/go.mod h1:eIDJ6jNk/IeJz6ODSksHl5Aiczy5JUq6vFhJWI5OtiI= diff --git a/subcommand/inject-connect/command.go b/subcommand/inject-connect/command.go index 80821cb9af..8123d87d62 100644 --- a/subcommand/inject-connect/command.go +++ b/subcommand/inject-connect/command.go @@ -1,4 +1,4 @@ -package subcommand +package connectinject import ( "context" @@ -13,8 +13,10 @@ import ( "sync/atomic" "time" + "github.com/deckarep/golang-set" "github.com/hashicorp/consul-k8s/connect-inject" "github.com/hashicorp/consul-k8s/helper/cert" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/flags" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" @@ -23,23 +25,44 @@ import ( "k8s.io/client-go/rest" ) +type arrayFlags []string + +func (i *arrayFlags) Set(value string) error { + *i = append(*i, value) + return nil +} + type Command struct { UI cli.Ui - flagListen string - flagAutoName string // MutatingWebhookConfiguration for updating - flagAutoHosts string // SANs for the auto-generated TLS cert. - flagCertFile string // TLS cert for listening (PEM) - flagKeyFile string // TLS cert private key (PEM) - flagDefaultInject bool // True to inject by default - flagConsulImage string // Docker image for Consul - flagEnvoyImage string // Docker image for Envoy - flagConsulK8sImage string // Docker image for consul-k8s - flagACLAuthMethod string // Auth Method to use for ACLs, if enabled - flagCentralConfig bool // True to enable central config injection - flagDefaultProtocol string // Default protocol for use with central config - flagConsulCACert string // Path to CA Certificate to use when communicating with Consul clients - flagSet *flag.FlagSet + flagListen string + flagAutoName string // MutatingWebhookConfiguration for updating + flagAutoHosts string // SANs for the auto-generated TLS cert. + flagCertFile string // TLS cert for listening (PEM) + flagKeyFile string // TLS cert private key (PEM) + flagDefaultInject bool // True to inject by default + flagConsulImage string // Docker image for Consul + flagEnvoyImage string // Docker image for Envoy + flagConsulK8sImage string // Docker image for consul-k8s + flagACLAuthMethod string // Auth Method to use for ACLs, if enabled + flagWriteServiceDefaults bool // True to enable central config injection + flagDefaultProtocol string // Default protocol for use with central config + flagConsulCACert string // Path to CA Certificate to use when communicating with Consul clients + + // Flags to support namespaces + flagEnableNamespaces bool // Use namespacing on all components + flagConsulDestinationNamespace string // Consul namespace to register everything if not mirroring + flagAllowK8sNamespacesList []string // K8s namespaces to explicitly inject + flagDenyK8sNamespacesList []string // K8s namespaces to deny injection (has precedence) + flagEnableK8SNSMirroring bool // Enables mirroring of k8s namespaces into Consul + flagK8SNSMirroringPrefix string // Prefix added to Consul namespaces created when mirroring + flagCrossNamespaceACLPolicy string // The name of the ACL policy to add to every created namespace if ACLs are enabled + + flagSet *flag.FlagSet + http *flags.HTTPFlags + + consulClient *api.Client + clientset *kubernetes.Clientset once sync.Once help string @@ -48,8 +71,8 @@ type Command struct { func (c *Command) init() { c.flagSet = flag.NewFlagSet("", flag.ContinueOnError) - c.flagSet.BoolVar(&c.flagDefaultInject, "default-inject", true, "Inject by default.") c.flagSet.StringVar(&c.flagListen, "listen", ":8080", "Address to bind listener to.") + c.flagSet.BoolVar(&c.flagDefaultInject, "default-inject", true, "Inject by default.") c.flagSet.StringVar(&c.flagAutoName, "tls-auto", "", "MutatingWebhookConfiguration name. If specified, will auto generate cert bundle.") c.flagSet.StringVar(&c.flagAutoHosts, "tls-auto-hosts", "", @@ -59,19 +82,40 @@ func (c *Command) init() { c.flagSet.StringVar(&c.flagKeyFile, "tls-key-file", "", "PEM-encoded TLS private key to serve. If blank, will generate random cert.") c.flagSet.StringVar(&c.flagConsulImage, "consul-image", connectinject.DefaultConsulImage, - "Docker image for Consul. Defaults to an Consul 1.3.0.") + "Docker image for Consul. Defaults to Consul 1.7.0.") c.flagSet.StringVar(&c.flagEnvoyImage, "envoy-image", connectinject.DefaultEnvoyImage, - "Docker image for Envoy. Defaults to Envoy 1.8.0.") + "Docker image for Envoy. Defaults to Envoy 1.9.1.") c.flagSet.StringVar(&c.flagConsulK8sImage, "consul-k8s-image", "", "Docker image for consul-k8s. Used for the connect sidecar.") c.flagSet.StringVar(&c.flagACLAuthMethod, "acl-auth-method", "", "The name of the Kubernetes Auth Method to use for connectInjection if ACLs are enabled.") - c.flagSet.BoolVar(&c.flagCentralConfig, "enable-central-config", false, + c.flagSet.BoolVar(&c.flagWriteServiceDefaults, "enable-central-config", false, "Write a service-defaults config for every Connect service using protocol from -default-protocol or Pod annotation.") c.flagSet.StringVar(&c.flagDefaultProtocol, "default-protocol", "", "The default protocol to use in central config registrations.") c.flagSet.StringVar(&c.flagConsulCACert, "consul-ca-cert", "", "Path to CA certificate to use if communicating with Consul clients over HTTPS.") + c.flagSet.Var((*flags.AppendSliceValue)(&c.flagAllowK8sNamespacesList), "allow-k8s-namespace", + "K8s namespaces to explicitly allow. May be specified multiple times.") + c.flagSet.Var((*flags.AppendSliceValue)(&c.flagDenyK8sNamespacesList), "deny-k8s-namespace", + "K8s namespaces to explicitly deny. Takes precedence over allow. May be specified multiple times.") + c.flagSet.BoolVar(&c.flagEnableNamespaces, "enable-namespaces", false, + "[Enterprise Only] Enables namespaces, in either a single Consul namespace or mirrored") + c.flagSet.StringVar(&c.flagConsulDestinationNamespace, "consul-destination-namespace", "default", + "[Enterprise Only] Defines which Consul namespace to register all injected services into. If '-enable-namespace-mirroring' "+ + "is true, this is not used.") + c.flagSet.BoolVar(&c.flagEnableK8SNSMirroring, "enable-k8s-namespace-mirroring", false, "[Enterprise Only] Enables "+ + "k8s namespace mirroring") + c.flagSet.StringVar(&c.flagK8SNSMirroringPrefix, "k8s-namespace-mirroring-prefix", "", + "[Enterprise Only] Prefix that will be added to all k8s namespaces mirrored into Consul if mirroring is enabled.") + c.flagSet.StringVar(&c.flagCrossNamespaceACLPolicy, "consul-cross-namespace-acl-policy", "", + "[Enterprise Only] Name of the ACL policy to attach to all created Consul namespaces to allow service "+ + "discovery across Consul namespaces. Only necessary if ACLs are enabled.") + + c.http = &flags.HTTPFlags{} + flags.Merge(c.flagSet, c.http.ClientFlags()) + flags.Merge(c.flagSet, c.http.ServerFlags()) + c.help = flags.Usage(help, c.flagSet) } @@ -88,15 +132,27 @@ func (c *Command) Run(args []string) int { } // We must have an in-cluster K8S client - config, err := rest.InClusterConfig() - if err != nil { - c.UI.Error(fmt.Sprintf("Error loading in-cluster K8S config: %s", err)) - return 1 + if c.clientset == nil { + config, err := rest.InClusterConfig() + if err != nil { + c.UI.Error(fmt.Sprintf("Error loading in-cluster K8S config: %s", err)) + return 1 + } + c.clientset, err = kubernetes.NewForConfig(config) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating K8S client: %s", err)) + return 1 + } } - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating K8S client: %s", err)) - return 1 + + // Set up Consul client + if c.consulClient == nil { + var err error + c.consulClient, err = c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err)) + return 1 + } } // Determine where to source the certificates from @@ -119,7 +175,17 @@ func (c *Command) Run(args []string) int { go certNotify.Start(context.Background()) ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() - go c.certWatcher(ctx, certCh, clientset) + go c.certWatcher(ctx, certCh, c.clientset) + + // Convert allow/deny lists to sets + allowSet := mapset.NewSet() + denySet := mapset.NewSet() + for _, allow := range c.flagAllowK8sNamespacesList { + allowSet.Add(allow) + } + for _, deny := range c.flagDenyK8sNamespacesList { + denySet.Add(deny) + } var consulCACert []byte if c.flagConsulCACert != "" { @@ -133,15 +199,23 @@ func (c *Command) Run(args []string) int { // Build the HTTP handler and server injector := connectinject.Handler{ - ImageConsul: c.flagConsulImage, - ImageEnvoy: c.flagEnvoyImage, - ImageConsulK8S: c.flagConsulK8sImage, - RequireAnnotation: !c.flagDefaultInject, - AuthMethod: c.flagACLAuthMethod, - WriteServiceDefaults: c.flagCentralConfig, - DefaultProtocol: c.flagDefaultProtocol, - ConsulCACert: string(consulCACert), - Log: hclog.Default().Named("handler"), + ConsulClient: c.consulClient, + ImageConsul: c.flagConsulImage, + ImageEnvoy: c.flagEnvoyImage, + ImageConsulK8S: c.flagConsulK8sImage, + RequireAnnotation: !c.flagDefaultInject, + AuthMethod: c.flagACLAuthMethod, + WriteServiceDefaults: c.flagWriteServiceDefaults, + DefaultProtocol: c.flagDefaultProtocol, + ConsulCACert: string(consulCACert), + EnableNamespaces: c.flagEnableNamespaces, + AllowK8sNamespacesSet: allowSet, + DenyK8sNamespacesSet: denySet, + ConsulDestinationNamespace: c.flagConsulDestinationNamespace, + EnableK8SNSMirroring: c.flagEnableK8SNSMirroring, + K8SNSMirroringPrefix: c.flagK8SNSMirroringPrefix, + CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, + Log: hclog.Default().Named("handler"), } mux := http.NewServeMux() mux.HandleFunc("/mutate", injector.Handle) diff --git a/subcommand/inject-connect/command_test.go b/subcommand/inject-connect/command_test.go index f6293d3d5f..b40bfd2dba 100644 --- a/subcommand/inject-connect/command_test.go +++ b/subcommand/inject-connect/command_test.go @@ -1,4 +1,4 @@ -package subcommand +package connectinject import ( "github.com/mitchellh/cli" diff --git a/subcommand/lifecycle-sidecar/command.go b/subcommand/lifecycle-sidecar/command.go index 22bcb12469..b8789eebb4 100644 --- a/subcommand/lifecycle-sidecar/command.go +++ b/subcommand/lifecycle-sidecar/command.go @@ -26,6 +26,8 @@ type Command struct { flagSet *flag.FlagSet flagLogLevel string + consulCommand []string + once sync.Once help string sigCh chan os.Signal @@ -58,29 +60,29 @@ func (c *Command) Run(args []string) int { return 1 } - syncPeriod, logLevel, err := c.validateFlags() + err := c.validateFlags() if err != nil { c.UI.Error("Error: " + err.Error()) return 1 } logger := hclog.New(&hclog.LoggerOptions{ - Level: logLevel, + Level: hclog.LevelFromString(c.flagLogLevel), Output: os.Stderr, }) // Log initial configuration logger.Info("Command configuration", "service-config", c.flagServiceConfig, "consul-binary", c.flagConsulBinary, - "sync-period", syncPeriod, - "log-level", logLevel) + "sync-period", c.flagSyncPeriod, + "log-level", c.flagLogLevel) // Set up channel for graceful SIGINT shutdown. signal.Notify(c.sigCh, os.Interrupt) - consulCommand := []string{"services", "register"} - consulCommand = append(consulCommand, c.parseConsulFlags()...) - consulCommand = append(consulCommand, c.flagServiceConfig) + c.consulCommand = []string{"services", "register"} + c.consulCommand = append(c.consulCommand, c.parseConsulFlags()...) + c.consulCommand = append(c.consulCommand, c.flagServiceConfig) // The main work loop. We continually re-register our service every // syncPeriod. Consul is smart enough to know when the service hasn't changed @@ -90,7 +92,7 @@ func (c *Command) Run(args []string) int { // // The loop will only exit when the Pod is shut down and we receive a SIGINT. for { - cmd := exec.Command(c.flagConsulBinary, consulCommand...) + cmd := exec.Command(c.flagConsulBinary, c.consulCommand...) // Run the command and record the stdout and stderr output output, err := cmd.CombinedOutput() @@ -102,7 +104,7 @@ func (c *Command) Run(args []string) int { // Re-loop after syncPeriod or exit if we receive an interrupt. select { - case <-time.After(syncPeriod): + case <-time.After(c.flagSyncPeriod): continue case <-c.sigCh: log.Info("SIGINT received, shutting down") @@ -111,34 +113,36 @@ func (c *Command) Run(args []string) int { } } -// validateFlags validates the flags and returns the parsed syncPeriod and -// logLevel. -func (c *Command) validateFlags() (syncPeriod time.Duration, logLevel hclog.Level, err error) { +// validateFlags validates the flags and returns the logLevel. +func (c *Command) validateFlags() error { if c.flagServiceConfig == "" { - err = errors.New("-service-config must be set") - return + return errors.New("-service-config must be set") } if c.flagConsulBinary == "" { - err = errors.New("-consul-binary must be set") - return + return errors.New("-consul-binary must be set") + } + if c.flagSyncPeriod == 0 { + // if sync period is 0, then the select loop will + // always pick the first case, and it'll be impossible + // to terminate the command gracefully with SIGINT. + return errors.New("-sync-period must be greater than 0") } - _, err = os.Stat(c.flagServiceConfig) + _, err := os.Stat(c.flagServiceConfig) if os.IsNotExist(err) { err = fmt.Errorf("-service-config file %q not found", c.flagServiceConfig) - return + return fmt.Errorf("-service-config file %q not found", c.flagServiceConfig) } _, err = exec.LookPath(c.flagConsulBinary) if err != nil { - err = fmt.Errorf("-consul-binary %q not found: %s", c.flagConsulBinary, err) - return + return fmt.Errorf("-consul-binary %q not found: %s", c.flagConsulBinary, err) } - logLevel = hclog.LevelFromString(c.flagLogLevel) + logLevel := hclog.LevelFromString(c.flagLogLevel) if logLevel == hclog.NoLevel { - err = fmt.Errorf("unknown log level: %s", c.flagLogLevel) - return + return fmt.Errorf("unknown log level: %s", c.flagLogLevel) } - return + + return nil } // parseConsulFlags creates Consul client command flags diff --git a/subcommand/lifecycle-sidecar/command_ent_test.go b/subcommand/lifecycle-sidecar/command_ent_test.go new file mode 100644 index 0000000000..fc3cf52995 --- /dev/null +++ b/subcommand/lifecycle-sidecar/command_ent_test.go @@ -0,0 +1,89 @@ +// +build enterprise + +package subcommand + +import ( + "os" + "testing" + "time" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +// Test that we register the services with namespaces. +func TestRun_ServicesRegistration_Namespaces(t *testing.T) { + t.Parallel() + tmpDir, configFile := createServicesTmpFile(t, servicesRegistrationWithNamespaces) + defer os.RemoveAll(tmpDir) + + a, err := testutil.NewTestServerT(t) + require.NoError(t, err) + defer a.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-http-addr", a.HTTPAddr, + "-service-config", configFile, + "-sync-period", "100ms", + }) + defer stopCommand(t, &cmd, exitChan) + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + // create necessary namespaces first + _, _, err = client.Namespaces().Create(&api.Namespace{Name: "namespace"}, nil) + require.NoError(t, err) + + timer := &retry.Timer{Timeout: 1 * time.Second, Wait: 100 * time.Millisecond} + retry.RunWith(timer, t, func(r *retry.R) { + svc, _, err := client.Agent().Service("service-id", &api.QueryOptions{Namespace: "namespace"}) + require.NoError(r, err) + require.Equal(r, 80, svc.Port) + require.Equal(r, "namespace", svc.Namespace) + + svcProxy, _, err := client.Agent().Service("service-id-sidecar-proxy", &api.QueryOptions{Namespace: "namespace"}) + require.NoError(r, err) + require.Equal(r, 2000, svcProxy.Port) + require.Equal(r, svcProxy.Namespace, "namespace") + require.Len(r, svcProxy.Proxy.Upstreams, 1) + require.Equal(r, svcProxy.Proxy.Upstreams[0].DestinationNamespace, "dest-namespace") + }) +} + +const servicesRegistrationWithNamespaces = ` +services { + id = "service-id" + name = "service" + port = 80 + namespace = "namespace" +} +services { + id = "service-id-sidecar-proxy" + name = "service-sidecar-proxy" + namespace = "namespace" + port = 2000 + kind = "connect-proxy" + proxy { + destination_service_name = "service" + destination_service_id = "service-id" + local_service_port = 80 + upstreams { + destination_type = "service" + destination_name = "dest-name" + destination_namespace = "dest-namespace" + local_bind_port = 1234 + } + } +}` diff --git a/subcommand/lifecycle-sidecar/command_test.go b/subcommand/lifecycle-sidecar/command_test.go index a448dc87e1..102ca5b9f6 100644 --- a/subcommand/lifecycle-sidecar/command_test.go +++ b/subcommand/lifecycle-sidecar/command_test.go @@ -42,6 +42,14 @@ func TestRun_FlagValidation(t *testing.T) { }, ExpErr: "-consul-binary must be set", }, + { + Flags: []string{ + "-service-config=/config.hcl", + "-consul-binary=consul", + "-sync-period=0s", + }, + ExpErr: "-sync-period must be greater than 0", + }, } for _, c := range cases { @@ -70,13 +78,13 @@ func TestRun_FlagValidation_ServiceConfigFileMissing(t *testing.T) { func TestRun_FlagValidation_ConsulBinaryMissing(t *testing.T) { t.Parallel() + ui := cli.NewMockUi() cmd := Command{ UI: ui, } - // Create a temporary service registration file - tmpDir, configFile := createServicesTmpFile(t) + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) defer os.RemoveAll(tmpDir) configFlag := "-service-config=" + configFile @@ -88,7 +96,8 @@ func TestRun_FlagValidation_ConsulBinaryMissing(t *testing.T) { func TestRun_FlagValidation_InvalidLogLevel(t *testing.T) { t.Parallel() - tmpDir, configFile := createServicesTmpFile(t) + + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) defer os.RemoveAll(tmpDir) ui := cli.NewMockUi() @@ -103,7 +112,8 @@ func TestRun_FlagValidation_InvalidLogLevel(t *testing.T) { // Test that we register the services. func TestRun_ServicesRegistration(t *testing.T) { t.Parallel() - tmpDir, configFile := createServicesTmpFile(t) + + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) defer os.RemoveAll(tmpDir) a, err := testutil.NewTestServerT(t) @@ -120,7 +130,6 @@ func TestRun_ServicesRegistration(t *testing.T) { "-http-addr", a.HTTPAddr, "-service-config", configFile, "-sync-period", "100ms", - "-consul-binary", "consul", }) defer stopCommand(t, &cmd, exitChan) @@ -144,7 +153,8 @@ func TestRun_ServicesRegistration(t *testing.T) { // Test that we register services when the Consul agent is down at first. func TestRun_ServicesRegistration_ConsulDown(t *testing.T) { t.Parallel() - tmpDir, configFile := createServicesTmpFile(t) + + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) defer os.RemoveAll(tmpDir) ui := cli.NewMockUi() @@ -157,7 +167,6 @@ func TestRun_ServicesRegistration_ConsulDown(t *testing.T) { "-http-addr", fmt.Sprintf("127.0.0.1:%d", randomPort), "-service-config", configFile, "-sync-period", "100ms", - "-consul-binary", "consul", }) defer stopCommand(t, &cmd, exitChan) @@ -190,6 +199,56 @@ func TestRun_ServicesRegistration_ConsulDown(t *testing.T) { }) } +// Test that we parse all flags and pass them down to the underlying Consul command. +func TestRun_ConsulCommandFlags(t *testing.T) { + t.Parallel() + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + a, err := testutil.NewTestServerT(t) + require.NoError(t, err) + defer a.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-http-addr", a.HTTPAddr, + "-service-config", configFile, + "-sync-period", "1s", + "-consul-binary", "consul", + "-token=abc", + "-token-file=/token/file", + "-ca-file=/ca/file", + "-ca-path=/ca/path", + "-client-cert=/client/cert", + "-client-key=/client/key", + "-tls-server-name=consul.foo.com", + }) + defer stopCommand(t, &cmd, exitChan) + + expectedCommand := []string{ + "services", + "register", + "-http-addr=" + a.HTTPAddr, + "-token=abc", + "-token-file=/token/file", + "-ca-file=/ca/file", + "-ca-path=/ca/path", + "-client-cert=/client/cert", + "-client-key=/client/key", + "-tls-server-name=consul.foo.com", + configFile, + } + timer := &retry.Timer{Timeout: 1000 * time.Millisecond, Wait: 100 * time.Millisecond} + retry.RunWith(timer, t, func(r *retry.R) { + require.ElementsMatch(r, expectedCommand, cmd.consulCommand) + }) +} + // This function starts the command asynchronously and returns a non-blocking chan. // When finished, the command will send its exit code to the channel. // Note that it's the responsibility of the caller to terminate the command by calling stopCommand, @@ -214,12 +273,12 @@ func stopCommand(t *testing.T, cmd *Command, exitChan chan int) { // createServicesTmpFile creates a temp directory // and writes servicesRegistration as an HCL file there. -func createServicesTmpFile(t *testing.T) (string, string) { +func createServicesTmpFile(t *testing.T, serviceHCL string) (string, string) { tmpDir, err := ioutil.TempDir("", "") require.NoError(t, err) configFile := filepath.Join(tmpDir, "svc.hcl") - err = ioutil.WriteFile(configFile, []byte(servicesRegistration), 0600) + err = ioutil.WriteFile(configFile, []byte(serviceHCL), 0600) require.NoError(t, err) return tmpDir, configFile diff --git a/subcommand/server-acl-init/command.go b/subcommand/server-acl-init/command.go index 0a29ab2046..d7f669b236 100644 --- a/subcommand/server-acl-init/command.go +++ b/subcommand/server-acl-init/command.go @@ -4,19 +4,17 @@ import ( "context" "errors" "flag" + "fmt" "os" - "strings" "sync" "time" - "fmt" "github.com/hashicorp/consul-k8s/subcommand" k8sflags "github.com/hashicorp/consul-k8s/subcommand/flags" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/flags" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" - apiv1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -31,26 +29,40 @@ type Command struct { flagServerLabelSelector string flagResourcePrefix string flagReplicas int - flagNamespace string + flagK8sNamespace string flagAllowDNS bool flagCreateClientToken bool flagCreateSyncToken bool + flagCreateInjectToken bool flagCreateInjectAuthMethod bool flagBindingRuleSelector string flagCreateEntLicenseToken bool flagCreateSnapshotAgentToken bool flagCreateMeshGatewayToken bool - flagLogLevel string - flagTimeout string flagConsulCACert string flagConsulTLSServerName string flagUseHTTPS bool + // Flags to support namespaces + flagEnableNamespaces bool // Use namespacing on all components + flagConsulSyncDestinationNamespace string // Consul namespace to register all catalog sync services into if not mirroring + flagEnableSyncK8SNSMirroring bool // Enables mirroring of k8s namespaces into Consul for catalog sync + flagSyncK8SNSMirroringPrefix string // Prefix added to Consul namespaces created when mirroring catalog sync services + flagConsulInjectDestinationNamespace string // Consul namespace to register all injected services into if not mirroring + flagEnableInjectK8SNSMirroring bool // Enables mirroring of k8s namespaces into Consul for Connect inject + flagInjectK8SNSMirroringPrefix string // Prefix added to Consul namespaces created when mirroring injected services + + flagLogLevel string + flagTimeout string + clientset kubernetes.Interface // cmdTimeout is cancelled when the command timeout is reached. cmdTimeout context.Context retryDuration time.Duration + // Log + Log hclog.Logger + once sync.Once help string } @@ -65,7 +77,7 @@ func (c *Command) init() { "Prefix to use for Kubernetes resources. If not set, the \"-consul\" prefix is used, where is the value set by the -release-name flag.") c.flags.IntVar(&c.flagReplicas, "expected-replicas", 1, "Number of expected Consul server replicas") - c.flags.StringVar(&c.flagNamespace, "k8s-namespace", "", + c.flags.StringVar(&c.flagK8sNamespace, "k8s-namespace", "", "Name of Kubernetes namespace where the servers are deployed") c.flags.BoolVar(&c.flagAllowDNS, "allow-dns", false, "Toggle for updating the anonymous token to allow DNS queries to work") @@ -73,8 +85,12 @@ func (c *Command) init() { "Toggle for creating a client agent token") c.flags.BoolVar(&c.flagCreateSyncToken, "create-sync-token", false, "Toggle for creating a catalog sync token") + c.flags.BoolVar(&c.flagCreateInjectToken, "create-inject-namespace-token", false, + "Toggle for creating a connect injector token. Only required when namespaces are enabled.") + c.flags.BoolVar(&c.flagCreateInjectAuthMethod, "create-inject-auth-method", false, + "Toggle for creating a connect inject auth method.") c.flags.BoolVar(&c.flagCreateInjectAuthMethod, "create-inject-token", false, - "Toggle for creating a connect inject token") + "Toggle for creating a connect inject auth method. Deprecated: use -create-inject-auth-method instead.") c.flags.StringVar(&c.flagBindingRuleSelector, "acl-binding-rule-selector", "", "Selector string for connectInject ACL Binding Rule") c.flags.BoolVar(&c.flagCreateEntLicenseToken, "create-enterprise-license-token", false, @@ -83,14 +99,32 @@ func (c *Command) init() { "Toggle for creating a token for the Consul snapshot agent deployment (enterprise only)") c.flags.BoolVar(&c.flagCreateMeshGatewayToken, "create-mesh-gateway-token", false, "Toggle for creating a token for a Connect mesh gateway") - c.flags.StringVar(&c.flagTimeout, "timeout", "10m", - "How long we'll try to bootstrap ACLs for before timing out, e.g. 1ms, 2s, 3m") c.flags.StringVar(&c.flagConsulCACert, "consul-ca-cert", "", "Path to the PEM-encoded CA certificate of the Consul cluster.") c.flags.StringVar(&c.flagConsulTLSServerName, "consul-tls-server-name", "", "The server name to set as the SNI header when sending HTTPS requests to Consul.") c.flags.BoolVar(&c.flagUseHTTPS, "use-https", false, "Toggle for using HTTPS for all API calls to Consul.") + c.flags.BoolVar(&c.flagEnableNamespaces, "enable-namespaces", false, + "[Enterprise Only] Enables namespaces, in either a single Consul namespace or mirrored [Enterprise only feature]") + c.flags.StringVar(&c.flagConsulSyncDestinationNamespace, "consul-sync-destination-namespace", "default", + "[Enterprise Only] Indicates which Consul namespace that catalog sync will register services into. If "+ + "'-enable-sync-k8s-namespace-mirroring' is true, this is not used.") + c.flags.BoolVar(&c.flagEnableSyncK8SNSMirroring, "enable-sync-k8s-namespace-mirroring", false, "[Enterprise Only] "+ + "Indicates that namespace mirroring will be used for catalog sync services.") + c.flags.StringVar(&c.flagSyncK8SNSMirroringPrefix, "sync-k8s-namespace-mirroring-prefix", "", + "[Enterprise Only] Prefix that will be added to all k8s namespaces mirrored into Consul by catalog sync "+ + "if mirroring is enabled.") + c.flags.StringVar(&c.flagConsulInjectDestinationNamespace, "consul-inject-destination-namespace", "default", + "[Enterprise Only] Indicates which Consul namespace that the Connect injector will register services into. If "+ + "'-enable-inject-k8s-namespace-mirroring' is true, this is not used.") + c.flags.BoolVar(&c.flagEnableInjectK8SNSMirroring, "enable-inject-k8s-namespace-mirroring", false, "[Enterprise Only] "+ + "Indicates that namespace mirroring will be used for Connect inject services.") + c.flags.StringVar(&c.flagInjectK8SNSMirroringPrefix, "inject-k8s-namespace-mirroring-prefix", "", + "[Enterprise Only] Prefix that will be added to all k8s namespaces mirrored into Consul by Connect inject "+ + "if mirroring is enabled.") + c.flags.StringVar(&c.flagTimeout, "timeout", "10m", + "How long we'll try to bootstrap ACLs for before timing out, e.g. 1ms, 2s, 3m") c.flags.StringVar(&c.flagLogLevel, "log-level", "info", "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ "\"debug\", \"info\", \"warn\", and \"error\".") @@ -152,13 +186,13 @@ func (c *Command) Run(args []string) int { // The context will only ever be intentionally ended by the timeout. defer cancel() - // Configure our logger. + // Configure our logger level := hclog.LevelFromString(c.flagLogLevel) if level == hclog.NoLevel { c.UI.Error(fmt.Sprintf("Unknown log level: %s", c.flagLogLevel)) return 1 } - logger := hclog.New(&hclog.LoggerOptions{ + c.Log = hclog.New(&hclog.LoggerOptions{ Level: level, Output: os.Stderr, }) @@ -166,7 +200,7 @@ func (c *Command) Run(args []string) int { // The ClientSet might already be set if we're in a test. if c.clientset == nil { if err := c.configureKubeClient(); err != nil { - logger.Error(err.Error()) + c.Log.Error(err.Error()) return 1 } } @@ -182,7 +216,7 @@ func (c *Command) Run(args []string) int { // because in older versions of consul-helm it wasn't labeled with // component: server. We also can't drop that label because it's required // for targeting the right server Pods. - statefulset, err := c.clientset.AppsV1().StatefulSets(c.flagNamespace).Get(ssName, metav1.GetOptions{}) + statefulset, err := c.clientset.AppsV1().StatefulSets(c.flagK8sNamespace).Get(ssName, metav1.GetOptions{}) if err != nil { return err } @@ -191,35 +225,42 @@ func (c *Command) Run(args []string) int { } return fmt.Errorf("rollout is in progress (CurrentRevision=%s UpdateRevision=%s)", statefulset.Status.CurrentRevision, statefulset.Status.UpdateRevision) - }, logger) + }) if err != nil { - logger.Error(err.Error()) + c.Log.Error(err.Error()) return 1 } // Check if we've already been bootstrapped. bootTokenSecretName := c.withPrefix("bootstrap-acl-token") - bootstrapToken, err := c.getBootstrapToken(logger, bootTokenSecretName) + bootstrapToken, err := c.getBootstrapToken(bootTokenSecretName) if err != nil { - logger.Error(fmt.Sprintf("Unexpected error looking for preexisting bootstrap Secret: %s", err)) + c.Log.Error(fmt.Sprintf("Unexpected error looking for preexisting bootstrap Secret: %s", err)) return 1 } + var updateServerPolicy bool if bootstrapToken != "" { - logger.Info(fmt.Sprintf("ACLs already bootstrapped - retrieved bootstrap token from Secret %q", bootTokenSecretName)) + c.Log.Info(fmt.Sprintf("ACLs already bootstrapped - retrieved bootstrap token from Secret %q", bootTokenSecretName)) + + // Mark that we should update the server ACL policy in case + // there are namespace related config changes. Because of the + // organization of the server token creation code, the policy + // otherwise won't be updated. + updateServerPolicy = true } else { - logger.Info("No bootstrap token from previous installation found, continuing on to bootstrapping") - bootstrapToken, err = c.bootstrapServers(logger, bootTokenSecretName, scheme) + c.Log.Info("No bootstrap token from previous installation found, continuing on to bootstrapping") + bootstrapToken, err = c.bootstrapServers(bootTokenSecretName, scheme) if err != nil { - logger.Error(err.Error()) + c.Log.Error(err.Error()) return 1 } } // For all of the next operations we'll need a Consul client. - serverPods, err := c.getConsulServers(logger, 1, scheme) + serverPods, err := c.getConsulServers(1, scheme) if err != nil { - logger.Error(err.Error()) + c.Log.Error(err.Error()) return 1 } serverAddr := serverPods[0].Addr @@ -233,75 +274,157 @@ func (c *Command) Run(args []string) int { }, }) if err != nil { - logger.Error(fmt.Sprintf("Error creating Consul client for addr %q: %s", serverAddr, err)) + c.Log.Error(fmt.Sprintf("Error creating Consul client for addr %q: %s", serverAddr, err)) return 1 } + // With the addition of namespaces, the ACL policies associated + // with the server tokens may need to be updated if Enterprise Consul + // users upgrade to 1.7+. This updates the policy if the bootstrap + // token had previously existed, which signals a potential config change. + if updateServerPolicy { + _, err = c.setServerPolicy(consulClient) + if err != nil { + c.Log.Error("Error updating the server ACL policy", "err", err) + return 1 + } + } + + // If namespaces are enabled, to allow cross-Consul-namespace permissions + // for services from k8s, the Consul `default` namespace needs a policy + // allowing service discovery in all namespaces. Each namespace that is + // created by consul-k8s components (this bootstrapper, catalog sync or + // connect inject) needs to reference this policy on namespace creation + // to finish the cross namespace permission setup. + if c.flagEnableNamespaces { + policyTmpl := api.ACLPolicy{ + Name: "cross-namespace-policy", + Description: "Policy to allow permissions to cross Consul namespaces for k8s services", + Rules: crossNamespaceRules, + } + err := c.untilSucceeds(fmt.Sprintf("creating %s policy", policyTmpl.Name), + func() error { + return c.createOrUpdateACLPolicy(policyTmpl, consulClient) + }) + if err != nil { + c.Log.Error("Error creating or updating the cross namespace policy", "err", err) + return 1 + } + + // Apply this to the PolicyDefaults for the Consul `default` namespace + aclConfig := api.NamespaceACLConfig{ + PolicyDefaults: []api.ACLLink{ + {Name: policyTmpl.Name}, + }, + } + consulNamespace := api.Namespace{ + Name: "default", + ACLs: &aclConfig, + } + _, _, err = consulClient.Namespaces().Update(&consulNamespace, &api.WriteOptions{}) + if err != nil { + c.Log.Error("Error updating the default namespace to include the cross namespace policy", "err", err) + return 1 + } + } + if c.flagCreateClientToken { - err := c.createACL("client", agentRules, consulClient, logger) + agentRules, err := c.agentRules() + if err != nil { + c.Log.Error("Error templating client agent rules", "err", err) + return 1 + } + + err = c.createACL("client", agentRules, consulClient) if err != nil { - logger.Error(err.Error()) + c.Log.Error(err.Error()) return 1 } } if c.flagAllowDNS { - err := c.configureDNSPolicies(logger, consulClient) + err := c.configureDNSPolicies(consulClient) if err != nil { - logger.Error(err.Error()) + c.Log.Error(err.Error()) return 1 } } if c.flagCreateSyncToken { - err := c.createACL("catalog-sync", syncRules, consulClient, logger) + syncRules, err := c.syncRules() + if err != nil { + c.Log.Error("Error templating sync rules", "err", err) + return 1 + } + + err = c.createACL("catalog-sync", syncRules, consulClient) + if err != nil { + c.Log.Error(err.Error()) + return 1 + } + } + + if c.flagCreateInjectToken { + injectRules, err := c.injectRules() + if err != nil { + c.Log.Error("Error templating inject rules", "err", err) + return 1 + } + + err = c.createACL("connect-inject", injectRules, consulClient) if err != nil { - logger.Error(err.Error()) + c.Log.Error(err.Error()) return 1 } } if c.flagCreateEntLicenseToken { - err := c.createACL("enterprise-license", entLicenseRules, consulClient, logger) + err := c.createACL("enterprise-license", entLicenseRules, consulClient) if err != nil { - logger.Error(err.Error()) + c.Log.Error(err.Error()) return 1 } } if c.flagCreateSnapshotAgentToken { - err := c.createACL("client-snapshot-agent", snapshotAgentRules, consulClient, logger) + err := c.createACL("client-snapshot-agent", snapshotAgentRules, consulClient) if err != nil { - logger.Error(err.Error()) + c.Log.Error(err.Error()) return 1 } } if c.flagCreateMeshGatewayToken { - err := c.createACL("mesh-gateway", meshGatewayRules, consulClient, logger) + meshGatewayRules, err := c.meshGatewayRules() + if err != nil { + c.Log.Error("Error templating dns rules", "err", err) + return 1 + } + + err = c.createACL("mesh-gateway", meshGatewayRules, consulClient) if err != nil { - logger.Error(err.Error()) + c.Log.Error(err.Error()) return 1 } } if c.flagCreateInjectAuthMethod { - err := c.configureConnectInject(logger, consulClient) + err := c.configureConnectInject(consulClient) if err != nil { - logger.Error(err.Error()) + c.Log.Error(err.Error()) return 1 } } - logger.Info("server-acl-init completed successfully") + c.Log.Info("server-acl-init completed successfully") return 0 } // getBootstrapToken returns the existing bootstrap token if there is one by // reading the Kubernetes Secret with name secretName. // If there is no bootstrap token yet, then it returns an empty string (not an error). -func (c *Command) getBootstrapToken(logger hclog.Logger, secretName string) (string, error) { - secret, err := c.clientset.CoreV1().Secrets(c.flagNamespace).Get(secretName, metav1.GetOptions{}) +func (c *Command) getBootstrapToken(secretName string) (string, error) { + secret, err := c.clientset.CoreV1().Secrets(c.flagK8sNamespace).Get(secretName, metav1.GetOptions{}) if err != nil { if k8serrors.IsNotFound(err) { return "", nil @@ -327,436 +450,17 @@ func (c *Command) configureKubeClient() error { return nil } -// getConsulServers returns n Consul server pods with their http addresses. -// If there are less server pods than 'n' then the function will wait. -func (c *Command) getConsulServers(logger hclog.Logger, n int, scheme string) ([]podAddr, error) { - var serverPods *apiv1.PodList - err := c.untilSucceeds("discovering Consul server pods", - func() error { - var err error - serverPods, err = c.clientset.CoreV1().Pods(c.flagNamespace).List(metav1.ListOptions{LabelSelector: c.flagServerLabelSelector}) - if err != nil { - return err - } - - if len(serverPods.Items) == 0 { - return fmt.Errorf("no server pods with labels %q found", c.flagServerLabelSelector) - } - - if len(serverPods.Items) < n { - return fmt.Errorf("found %d servers, require %d", len(serverPods.Items), n) - } - - for _, pod := range serverPods.Items { - if pod.Status.PodIP == "" { - return fmt.Errorf("pod %s has no IP", pod.Name) - } - } - return nil - }, logger) - if err != nil { - return nil, err - } - - var podAddrs []podAddr - for _, pod := range serverPods.Items { - var httpPort int32 - for _, p := range pod.Spec.Containers[0].Ports { - if p.Name == scheme { - httpPort = p.ContainerPort - } - } - if httpPort == 0 { - return nil, fmt.Errorf("pod %s has no port labeled '%s'", pod.Name, scheme) - } - addr := fmt.Sprintf("%s:%d", pod.Status.PodIP, httpPort) - podAddrs = append(podAddrs, podAddr{ - Name: pod.Name, - Addr: addr, - }) - } - return podAddrs, nil -} - -// bootstrapServers bootstraps ACLs and ensures each server has an ACL token. -func (c *Command) bootstrapServers(logger hclog.Logger, bootTokenSecretName, scheme string) (string, error) { - serverPods, err := c.getConsulServers(logger, c.flagReplicas, scheme) - if err != nil { - return "", err - } - logger.Info(fmt.Sprintf("Found %d Consul server Pods", len(serverPods))) - - // Pick the first pod to connect to for bootstrapping and set up connection. - firstServerAddr := serverPods[0].Addr - consulClient, err := api.NewClient(&api.Config{ - Address: firstServerAddr, - Scheme: scheme, - TLSConfig: api.TLSConfig{ - Address: c.flagConsulTLSServerName, - CAFile: c.flagConsulCACert, - }, - }) - if err != nil { - return "", fmt.Errorf("creating Consul client for address %s: %s", firstServerAddr, err) - } - - // Call bootstrap ACLs API. - var bootstrapToken []byte - var unrecoverableErr error - err = c.untilSucceeds("bootstrapping ACLs - PUT /v1/acl/bootstrap", - func() error { - bootstrapResp, _, err := consulClient.ACL().Bootstrap() - if err == nil { - bootstrapToken = []byte(bootstrapResp.SecretID) - return nil - } - - // Check if already bootstrapped. - if strings.Contains(err.Error(), "Unexpected response code: 403") { - unrecoverableErr = errors.New("ACLs already bootstrapped but the ACL token was not written to a Kubernetes secret." + - " We can't proceed because the bootstrap token is lost." + - " You must reset ACLs.") - return nil - } - - if isNoLeaderErr(err) { - // Return a more descriptive error in the case of no leader - // being elected. - return fmt.Errorf("no leader elected: %s", err) - } - return err - }, logger) - if unrecoverableErr != nil { - return "", unrecoverableErr - } - if err != nil { - return "", err - } - - // Write bootstrap token to a Kubernetes secret. - err = c.untilSucceeds(fmt.Sprintf("writing bootstrap Secret %q", bootTokenSecretName), - func() error { - secret := &apiv1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: bootTokenSecretName, - }, - Data: map[string][]byte{ - "token": bootstrapToken, - }, - } - _, err := c.clientset.CoreV1().Secrets(c.flagNamespace).Create(secret) - return err - }, logger) - if err != nil { - return "", err - } - - // Override our original client with a new one that has the bootstrap token - // set. - consulClient, err = api.NewClient(&api.Config{ - Address: firstServerAddr, - Scheme: scheme, - Token: string(bootstrapToken), - TLSConfig: api.TLSConfig{ - Address: c.flagConsulTLSServerName, - CAFile: c.flagConsulCACert, - }, - }) - if err != nil { - return "", fmt.Errorf("creating Consul client for address %s: %s", firstServerAddr, err) - } - - // Create new tokens for each server and apply them. - if err := c.setServerTokens(logger, consulClient, serverPods, string(bootstrapToken), scheme); err != nil { - return "", err - } - return string(bootstrapToken), nil -} - -// setServerTokens creates policies and associated ACL token for each server -// and then provides the token to the server. -func (c *Command) setServerTokens(logger hclog.Logger, consulClient *api.Client, - serverPods []podAddr, bootstrapToken, scheme string) error { - // Create agent policy. - agentPolicy := api.ACLPolicy{ - Name: "agent-token", - Description: "Agent Token Policy", - Rules: agentRules, - } - err := c.untilSucceeds("creating agent policy - PUT /v1/acl/policy", - func() error { - _, _, err := consulClient.ACL().PolicyCreate(&agentPolicy, nil) - if isPolicyExistsErr(err, agentPolicy.Name) { - logger.Info(fmt.Sprintf("Policy %q already exists", agentPolicy.Name)) - return nil - } - return err - }, logger) - if err != nil { - return err - } - - // Create agent token for each server agent. - var serverTokens []api.ACLToken - for _, pod := range serverPods { - var token *api.ACLToken - err := c.untilSucceeds(fmt.Sprintf("creating server token for %s - PUT /v1/acl/token", pod.Name), - func() error { - tokenReq := api.ACLToken{ - Description: fmt.Sprintf("Server Token for %s", pod.Name), - Policies: []*api.ACLTokenPolicyLink{{Name: agentPolicy.Name}}, - } - var err error - token, _, err = consulClient.ACL().TokenCreate(&tokenReq, nil) - return err - }, logger) - if err != nil { - return err - } - serverTokens = append(serverTokens, *token) - } - - // Pass out agent tokens to servers. - for i, pod := range serverPods { - // We create a new client for each server because we need to call each - // server specifically. - serverClient, err := api.NewClient(&api.Config{ - Address: pod.Addr, - Scheme: scheme, - Token: bootstrapToken, - TLSConfig: api.TLSConfig{ - Address: c.flagConsulTLSServerName, - CAFile: c.flagConsulCACert, - }, - }) - if err != nil { - return fmt.Errorf(" creating Consul client for address %q: %s", pod.Addr, err) - } - podName := pod.Name - - // Update token. - err = c.untilSucceeds(fmt.Sprintf("updating server token for %s - PUT /v1/agent/token/agent", podName), - func() error { - _, err := serverClient.Agent().UpdateAgentACLToken(serverTokens[i].SecretID, nil) - return err - }, logger) - if err != nil { - return err - } - } - return nil -} - -// createACL creates a policy with rules and name, creates an ACL token for that -// policy and then writes the token to a Kubernetes secret. -func (c *Command) createACL(name, rules string, consulClient *api.Client, logger hclog.Logger) error { - // Check if the secret already exists, if so, we assume the ACL has already been created. - secretName := c.withPrefix(name + "-acl-token") - _, err := c.clientset.CoreV1().Secrets(c.flagNamespace).Get(secretName, metav1.GetOptions{}) - if err == nil { - logger.Info(fmt.Sprintf("Secret %q already exists", secretName)) - return nil - } - - // Create policy with the given rules. - policyTmpl := api.ACLPolicy{ - Name: fmt.Sprintf("%s-token", name), - Description: fmt.Sprintf("%s Token Policy", name), - Rules: rules, - } - err = c.untilSucceeds(fmt.Sprintf("creating %s policy", policyTmpl.Name), - func() error { - _, _, err := consulClient.ACL().PolicyCreate(&policyTmpl, &api.WriteOptions{}) - if isPolicyExistsErr(err, policyTmpl.Name) { - logger.Info(fmt.Sprintf("Policy %q already exists", policyTmpl.Name)) - return nil - } - return err - }, logger) - if err != nil { - return err - } - - // Create token for the policy. - tokenTmpl := api.ACLToken{ - Description: fmt.Sprintf("%s Token", name), - Policies: []*api.ACLTokenPolicyLink{{Name: policyTmpl.Name}}, - } - var token string - err = c.untilSucceeds(fmt.Sprintf("creating token for policy %s", policyTmpl.Name), - func() error { - createdToken, _, err := consulClient.ACL().TokenCreate(&tokenTmpl, &api.WriteOptions{}) - if err == nil { - token = createdToken.SecretID - } - return err - }, logger) - if err != nil { - return err - } - - // Write token to a Kubernetes secret. - return c.untilSucceeds(fmt.Sprintf("writing Secret for token %s", policyTmpl.Name), - func() error { - secret := &apiv1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - }, - Data: map[string][]byte{ - "token": []byte(token), - }, - } - _, err := c.clientset.CoreV1().Secrets(c.flagNamespace).Create(secret) - return err - }, logger) -} - -// configureDNSPolicies sets up policies and tokens so that Consul DNS will -// work. -func (c *Command) configureDNSPolicies(logger hclog.Logger, consulClient *api.Client) error { - // Create policy for the anonymous token - dnsPolicy := api.ACLPolicy{ - Name: "dns-policy", - Description: "DNS Policy", - Rules: dnsRules, - } - - err := c.untilSucceeds("creating dns policy - PUT /v1/acl/policy", - func() error { - _, _, err := consulClient.ACL().PolicyCreate(&dnsPolicy, nil) - if isPolicyExistsErr(err, dnsPolicy.Name) { - logger.Info(fmt.Sprintf("Policy %q already exists", dnsPolicy.Name)) - return nil - } - return err - }, logger) - if err != nil { - return err - } - - // Create token to get sent to TokenUpdate - aToken := api.ACLToken{ - AccessorID: "00000000-0000-0000-0000-000000000002", - Policies: []*api.ACLTokenPolicyLink{{Name: dnsPolicy.Name}}, - } - - // Update anonymous token to include this policy - return c.untilSucceeds("updating anonymous token with DNS policy", - func() error { - _, _, err := consulClient.ACL().TokenUpdate(&aToken, &api.WriteOptions{}) - return err - }, logger) -} - -// configureConnectInject sets up auth methods so that connect injection will -// work. -func (c *Command) configureConnectInject(logger hclog.Logger, consulClient *api.Client) error { - // First, check if there's already an acl binding rule. If so, then this - // work is already done. - authMethodName := c.withPrefix("k8s-auth-method") - var existingRules []*api.ACLBindingRule - err := c.untilSucceeds(fmt.Sprintf("listing binding rules for auth method %s", authMethodName), - func() error { - var err error - existingRules, _, err = consulClient.ACL().BindingRuleList(authMethodName, nil) - return err - }, logger) - if err != nil { - return err - } - if len(existingRules) > 0 { - logger.Info(fmt.Sprintf("Binding rule for %s already exists", authMethodName)) - return nil - } - - var kubeSvc *apiv1.Service - err = c.untilSucceeds("getting kubernetes service IP", - func() error { - var err error - kubeSvc, err = c.clientset.CoreV1().Services("default").Get("kubernetes", metav1.GetOptions{}) - return err - }, logger) - if err != nil { - return err - } - - // Get the Secret name for the auth method ServiceAccount. - var authMethodServiceAccount *apiv1.ServiceAccount - saName := c.withPrefix("connect-injector-authmethod-svc-account") - err = c.untilSucceeds(fmt.Sprintf("getting %s ServiceAccount", saName), - func() error { - var err error - authMethodServiceAccount, err = c.clientset.CoreV1().ServiceAccounts(c.flagNamespace).Get(saName, metav1.GetOptions{}) - return err - }, logger) - if err != nil { - return err - } - - // ServiceAccounts always have a secret name. The secret - // contains the JWT token. - saSecretName := authMethodServiceAccount.Secrets[0].Name - - // Get the secret that will contain the ServiceAccount JWT token. - var saSecret *apiv1.Secret - err = c.untilSucceeds(fmt.Sprintf("getting %s Secret", saSecretName), - func() error { - var err error - saSecret, err = c.clientset.CoreV1().Secrets(c.flagNamespace).Get(saSecretName, metav1.GetOptions{}) - return err - }, logger) - if err != nil { - return err - } - - // Now we're ready to set up Consul's auth method. - authMethodTmpl := api.ACLAuthMethod{ - Name: authMethodName, - Description: "Kubernetes AuthMethod", - Type: "kubernetes", - Config: map[string]interface{}{ - "Host": fmt.Sprintf("https://%s:443", kubeSvc.Spec.ClusterIP), - "CACert": string(saSecret.Data["ca.crt"]), - "ServiceAccountJWT": string(saSecret.Data["token"]), - }, - } - var authMethod *api.ACLAuthMethod - err = c.untilSucceeds(fmt.Sprintf("creating auth method %s", authMethodTmpl.Name), - func() error { - var err error - authMethod, _, err = consulClient.ACL().AuthMethodCreate(&authMethodTmpl, &api.WriteOptions{}) - return err - }, logger) - if err != nil { - return err - } - - // Create the binding rule. - abr := api.ACLBindingRule{ - Description: "Kubernetes binding rule", - AuthMethod: authMethod.Name, - BindType: api.BindingRuleBindTypeService, - BindName: "${serviceaccount.name}", - Selector: c.flagBindingRuleSelector, - } - return c.untilSucceeds(fmt.Sprintf("creating acl binding rule for %s", authMethodTmpl.Name), - func() error { - _, _, err := consulClient.ACL().BindingRuleCreate(&abr, nil) - return err - }, logger) -} - // untilSucceeds runs op until it returns a nil error. // If c.cmdTimeout is cancelled it will exit. -func (c *Command) untilSucceeds(opName string, op func() error, logger hclog.Logger) error { +func (c *Command) untilSucceeds(opName string, op func() error) error { for { err := op() if err == nil { - logger.Info(fmt.Sprintf("Success: %s", opName)) + c.Log.Info(fmt.Sprintf("Success: %s", opName)) break } - logger.Error(fmt.Sprintf("Failure: %s", opName), "err", err) - logger.Info("Retrying in " + c.retryDuration.String()) + c.Log.Error(fmt.Sprintf("Failure: %s", opName), "err", err) + c.Log.Info("Retrying in " + c.retryDuration.String()) // Wait on either the retry duration (in which case we continue) or the // overall command timeout. select { @@ -781,30 +485,6 @@ func (c *Command) withPrefix(resource string) string { return fmt.Sprintf("%s-consul-%s", c.flagReleaseName, resource) } -// isNoLeaderErr returns true if err is due to trying to call the -// bootstrap ACLs API when there is no leader elected. -func isNoLeaderErr(err error) bool { - return err != nil && strings.Contains(err.Error(), "Unexpected response code: 500") && - strings.Contains(err.Error(), "The ACL system is currently in legacy mode.") -} - -// isPolicyExistsErr returns true if err is due to trying to call the -// policy create API when the policy already exists. -func isPolicyExistsErr(err error, policyName string) bool { - return err != nil && - strings.Contains(err.Error(), "Unexpected response code: 500") && - strings.Contains(err.Error(), fmt.Sprintf("Invalid Policy: A Policy with Name %q already exists", policyName)) -} - -// podAddr is a convenience struct for passing around pod names and -// addresses for Consul servers. -type podAddr struct { - // Name is the name of the pod. - Name string - // Addr is in the form ":". - Addr string -} - const synopsis = "Initialize ACLs on Consul servers and other components." const help = ` Usage: consul-k8s server-acl-init [options] @@ -815,51 +495,3 @@ Usage: consul-k8s server-acl-init [options] and safe to run multiple times. ` - -// ACL rules -const agentRules = `node_prefix "" { - policy = "write" -} -service_prefix "" { - policy = "read" -}` - -const dnsRules = `node_prefix "" { - policy = "read" -} -service_prefix "" { - policy = "read" -}` - -const syncRules = `node_prefix "" { - policy = "read" -} -node "k8s-sync" { - policy = "write" -} -service_prefix "" { - policy = "write" -}` - -const snapshotAgentRules = `acl = "write" -key "consul-snapshot/lock" { - policy = "write" -} -session_prefix "" { - policy = "write" -} -service "consul-snapshot" { - policy = "write" -}` - -// This assumes users are using the default name for the service, i.e. -// "mesh-gateway". -const meshGatewayRules = `service_prefix "" { - policy = "read" -} - -service "mesh-gateway" { - policy = "write" -}` - -const entLicenseRules = `operator = "write"` diff --git a/subcommand/server-acl-init/command_ent_test.go b/subcommand/server-acl-init/command_ent_test.go new file mode 100644 index 0000000000..f287f278f9 --- /dev/null +++ b/subcommand/server-acl-init/command_ent_test.go @@ -0,0 +1,583 @@ +// +build enterprise + +package serveraclinit + +import ( + "testing" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + "k8s.io/client-go/kubernetes/fake" +) + +// Test the auth method and acl binding rule created when namespaces are enabled +// and there's a single consul destination namespace. +func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { + t.Parallel() + + consulDestNamespaces := []string{"default", "destination"} + for _, consulDestNamespace := range consulDestNamespaces { + t.Run(consulDestNamespace, func(tt *testing.T) { + k8s, testAgent := completeEnterpriseSetup(tt, resourcePrefix, ns) + defer testAgent.Stop() + setUpK8sServiceAccount(tt, k8s) + require := require.New(tt) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + cmd.init() + args := []string{ + "-server-label-selector=component=server,app=consul,release=" + releaseName, + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-expected-replicas=1", + "-create-inject-auth-method", + "-enable-namespaces", + "-consul-inject-destination-namespace", consulDestNamespace, + "-acl-binding-rule-selector=serviceaccount.name!=default", + } + + responseCode := cmd.Run(args) + require.Equal(0, responseCode, ui.ErrorWriter.String()) + + bootToken := getBootToken(t, k8s, resourcePrefix, ns) + consul, err := api.NewClient(&api.Config{ + Address: testAgent.HTTPAddr, + Token: bootToken, + }) + require.NoError(err) + + // Ensure there's only one auth method. + namespaceQuery := &api.QueryOptions{ + Namespace: consulDestNamespace, + } + methods, _, err := consul.ACL().AuthMethodList(namespaceQuery) + require.NoError(err) + require.Len(methods, 1) + + // Check the ACL auth method is created in the expected namespace. + authMethodName := releaseName + "-consul-k8s-auth-method" + actMethod, _, err := consul.ACL().AuthMethodRead(authMethodName, namespaceQuery) + require.NoError(err) + require.NotNil(actMethod) + require.Equal("kubernetes", actMethod.Type) + require.Equal("Kubernetes AuthMethod", actMethod.Description) + require.NotContains(actMethod.Config, "MapNamespaces") + require.NotContains(actMethod.Config, "ConsulNamespacePrefix") + + // Check the binding rule is as expected. + rules, _, err := consul.ACL().BindingRuleList(authMethodName, namespaceQuery) + require.NoError(err) + require.Len(rules, 1) + actRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, namespaceQuery) + require.NoError(err) + require.NotNil(actRule) + require.Equal("Kubernetes binding rule", actRule.Description) + require.Equal(api.BindingRuleBindTypeService, actRule.BindType) + require.Equal("${serviceaccount.name}", actRule.BindName) + require.Equal("serviceaccount.name!=default", actRule.Selector) + + // Check that the default namespace got an attached ACL policy + defNamespace, _, err := consul.Namespaces().Read("default", &api.QueryOptions{}) + require.NoError(err) + require.NotNil(defNamespace) + require.NotNil(defNamespace.ACLs) + require.Len(defNamespace.ACLs.PolicyDefaults, 1) + require.Equal("cross-namespace-policy", defNamespace.ACLs.PolicyDefaults[0].Name) + + if consulDestNamespace != "default" { + // Check that only one namespace was created besides the + // already existing `default` namespace + namespaces, _, err := consul.Namespaces().List(&api.QueryOptions{}) + require.NoError(err) + require.Len(namespaces, 2) + + // Check the created namespace properties + actNamespace, _, err := consul.Namespaces().Read(consulDestNamespace, &api.QueryOptions{}) + require.NoError(err) + require.NotNil(actNamespace) + require.Equal(consulDestNamespace, actNamespace.Name) + require.Equal("Auto-generated by the ACL bootstrapping process", actNamespace.Description) + require.NotNil(actNamespace.ACLs) + require.Len(actNamespace.ACLs.PolicyDefaults, 1) + require.Equal("cross-namespace-policy", actNamespace.ACLs.PolicyDefaults[0].Name) + require.Contains(actNamespace.Meta, "external-source") + require.Equal("kubernetes", actNamespace.Meta["external-source"]) + } + }) + } +} + +// Test the auth method and acl binding rule created when namespaces are enabled +// and we're mirroring namespaces. +func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + MirroringPrefix string + ExtraFlags []string + }{ + "no prefix": { + MirroringPrefix: "", + ExtraFlags: nil, + }, + "with prefix": { + MirroringPrefix: "prefix-", + ExtraFlags: nil, + }, + "with destination namespace flag": { + MirroringPrefix: "", + // Mirroring takes precedence over this flag so it should have no + // effect. + ExtraFlags: []string{"-consul-inject-destination-namespace=dest"}, + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + k8s, testAgent := completeEnterpriseSetup(t, resourcePrefix, ns) + defer testAgent.Stop() + setUpK8sServiceAccount(tt, k8s) + require := require.New(tt) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + cmd.init() + args := []string{ + "-server-label-selector=component=server,app=consul,release=" + releaseName, + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-expected-replicas=1", + "-create-inject-auth-method", + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix", c.MirroringPrefix, + "-acl-binding-rule-selector=serviceaccount.name!=default", + } + args = append(args, c.ExtraFlags...) + responseCode := cmd.Run(args) + require.Equal(0, responseCode, ui.ErrorWriter.String()) + + bootToken := getBootToken(t, k8s, resourcePrefix, ns) + consul, err := api.NewClient(&api.Config{ + Address: testAgent.HTTPAddr, + Token: bootToken, + }) + require.NoError(err) + + // Check the ACL auth method is as expected. + authMethodName := releaseName + "-consul-k8s-auth-method" + method, _, err := consul.ACL().AuthMethodRead(authMethodName, nil) + require.NoError(err) + require.NotNil(method, authMethodName+" not found") + require.Equal("kubernetes", method.Type) + require.Equal("Kubernetes AuthMethod", method.Description) + require.Contains(method.Config, "MapNamespaces") + require.Contains(method.Config, "ConsulNamespacePrefix") + require.Equal(true, method.Config["MapNamespaces"]) + require.Equal(c.MirroringPrefix, method.Config["ConsulNamespacePrefix"]) + + // Check the binding rule is as expected. + rules, _, err := consul.ACL().BindingRuleList(authMethodName, nil) + require.NoError(err) + require.Len(rules, 1) + actRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, nil) + require.NoError(err) + require.NotNil(actRule) + require.Equal("Kubernetes binding rule", actRule.Description) + require.Equal(api.BindingRuleBindTypeService, actRule.BindType) + require.Equal("${serviceaccount.name}", actRule.BindName) + require.Equal("serviceaccount.name!=default", actRule.Selector) + }) + } +} + +// Test that ACL policies get updated if namespaces config changes. +func TestRun_ACLPolicyUpdates(t *testing.T) { + t.Parallel() + + k8sNamespaceFlags := []string{"default", "other"} + for _, k8sNamespaceFlag := range k8sNamespaceFlags { + t.Run(k8sNamespaceFlag, func(t *testing.T) { + k8s, testAgent := completeEnterpriseSetup(t, resourcePrefix, k8sNamespaceFlag) + defer testAgent.Stop() + require := require.New(t) + + ui := cli.NewMockUi() + firstRunArgs := []string{ + "-server-label-selector=component=server,app=consul,release=" + releaseName, + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace", k8sNamespaceFlag, + "-create-client-token", + "-allow-dns", + "-create-mesh-gateway-token", + "-create-sync-token", + "-create-inject-namespace-token", + "-create-snapshot-agent-token", + "-create-enterprise-license-token", + "-expected-replicas=1", + } + // Our second run, we're going to update from namespaces disabled to + // namespaces enabled with a single destination ns. + secondRunArgs := append(firstRunArgs, + "-enable-namespaces", + "-consul-sync-destination-namespace=sync", + "-consul-inject-destination-namespace=dest") + + // Run the command first to populate the policies. + cmd := Command{ + UI: ui, + clientset: k8s, + } + responseCode := cmd.Run(firstRunArgs) + require.Equal(0, responseCode, ui.ErrorWriter.String()) + + bootToken := getBootToken(t, k8s, resourcePrefix, k8sNamespaceFlag) + consul, err := api.NewClient(&api.Config{ + Address: testAgent.HTTPAddr, + Token: bootToken, + }) + require.NoError(err) + + // Check that the expected policies were created. + firstRunExpectedPolicies := []string{ + "dns-policy", + "client-token", + "catalog-sync-token", + "connect-inject-token", + "mesh-gateway-token", + "client-snapshot-agent-token", + "enterprise-license-token", + } + policies, _, err := consul.ACL().PolicyList(nil) + require.NoError(err) + + // Check that we have the right number of policies. The actual + // policies will have two more than expected because of the + // global management and namespace management polices that + // are automatically created, the latter in consul-ent v1.7+. + require.Equal(len(firstRunExpectedPolicies), len(policies)-2) + + // Collect the actual policies into a map to make it easier to assert + // on their existence and contents. + actualPolicies := make(map[string]string) + for _, p := range policies { + policy, _, err := consul.ACL().PolicyRead(p.ID, nil) + require.NoError(err) + actualPolicies[p.Name] = policy.Rules + } + for _, expected := range firstRunExpectedPolicies { + actRules, ok := actualPolicies[expected] + require.True(ok, "Did not find policy %s", expected) + // We assert that the policy doesn't have any namespace config + // in it because later that's what we're using to test that it + // got updated. + require.NotContains(actRules, "namespace") + } + + // Re-run the command with namespace flags. The policies should be updated. + // NOTE: We're redefining the command so that the old flag values are + // reset. + cmd = Command{ + UI: ui, + clientset: k8s, + } + responseCode = cmd.Run(secondRunArgs) + require.Equal(0, responseCode, ui.ErrorWriter.String()) + + // Check that the policies have all been updated. + secondRunExpectedPolicies := []string{ + "dns-policy", + "client-token", + "catalog-sync-token", + "connect-inject-token", + "mesh-gateway-token", + "client-snapshot-agent-token", + "enterprise-license-token", + "cross-namespace-policy", + } + policies, _, err = consul.ACL().PolicyList(nil) + require.NoError(err) + + // Check that we have the right number of policies. The actual + // policies will have two more than expected because of the + // global management and namespace management polices that + // are automatically created, the latter in consul-ent v1.7+. + require.Equal(len(secondRunExpectedPolicies), len(policies)-2) + + // Collect the actual policies into a map to make it easier to assert + // on their existence and contents. + actualPolicies = make(map[string]string) + for _, p := range policies { + policy, _, err := consul.ACL().PolicyRead(p.ID, nil) + require.NoError(err) + actualPolicies[p.Name] = policy.Rules + } + for _, expected := range secondRunExpectedPolicies { + actRules, ok := actualPolicies[expected] + require.True(ok, "Did not find policy %s", expected) + + switch expected { + case "connect-inject-token": + // The connect inject token doesn't have namespace config, + // but does change to operator:write from an empty string. + require.Contains(actRules, "operator = \"write\"") + case "client-snapshot-agent-token", "enterprise-license-token": + // The snapshot agent and enterprise license tokens shouldn't change. + require.NotContains(actRules, "namespace") + default: + // Assert that the policies have the word namespace in them. This + // tests that they were updated. The actual contents are tested + // in rules_test.go. + require.Contains(actRules, "namespace") + } + } + }) + } +} + +// Test that re-running the commands results in auth method and binding rules +// being updated. +func TestRun_ConnectInject_Updates(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + // Args for first run of command. + FirstRunArgs []string + // Args for second run of command. + SecondRunArgs []string + // Expected namespace for the auth method. + AuthMethodExpectedNS string + // If true, we expect MapNamespaces to be set on the auth method + // config. + AuthMethodExpectMapNamespacesConfig bool + // If AuthMethodExpectMapNamespacesConfig is true, we will assert + // that the ConsulNamespacePrefix field on the auth method config + // is set to this. + AuthMethodExpectedNamespacePrefixConfig string + // Expected namespace for the binding rule. + BindingRuleExpectedNS string + }{ + "no ns => mirroring ns, no prefix": { + FirstRunArgs: nil, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "", + BindingRuleExpectedNS: "default", + }, + "no ns => mirroring ns, prefix": { + FirstRunArgs: nil, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "prefix-", + BindingRuleExpectedNS: "default", + }, + "no ns => single dest ns": { + FirstRunArgs: nil, + SecondRunArgs: []string{ + "-create-inject-auth-method", + "-enable-namespaces", + "-consul-inject-destination-namespace=dest", + }, + AuthMethodExpectedNS: "dest", + AuthMethodExpectMapNamespacesConfig: false, + AuthMethodExpectedNamespacePrefixConfig: "", + BindingRuleExpectedNS: "dest", + }, + "mirroring ns => single dest ns": { + FirstRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + SecondRunArgs: []string{ + "-enable-namespaces", + "-consul-inject-destination-namespace=dest", + }, + AuthMethodExpectedNS: "dest", + AuthMethodExpectMapNamespacesConfig: false, + AuthMethodExpectedNamespacePrefixConfig: "", + BindingRuleExpectedNS: "dest", + }, + "single dest ns => mirroring ns": { + FirstRunArgs: []string{ + "-enable-namespaces", + "-consul-inject-destination-namespace=dest", + }, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "prefix-", + BindingRuleExpectedNS: "default", + }, + "mirroring ns (no prefix) => mirroring ns (no prefix)": { + FirstRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=", + }, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "", + BindingRuleExpectedNS: "default", + }, + "mirroring ns => mirroring ns (same prefix)": { + FirstRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "prefix-", + BindingRuleExpectedNS: "default", + }, + "mirroring ns (no prefix) => mirroring ns (prefix)": { + FirstRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=", + }, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "prefix-", + BindingRuleExpectedNS: "default", + }, + "mirroring ns (prefix) => mirroring ns (no prefix)": { + FirstRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "", + BindingRuleExpectedNS: "default", + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + require := require.New(tt) + k8s, testAgent := completeEnterpriseSetup(tt, resourcePrefix, ns) + defer testAgent.Stop() + setUpK8sServiceAccount(tt, k8s) + + ui := cli.NewMockUi() + defaultArgs := []string{ + "-server-label-selector=component=server,app=consul,release=" + releaseName, + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-expected-replicas=1", + "-create-inject-auth-method", + } + + // First run. NOTE: we don't assert anything here since we've + // tested these results in other tests. What we care about here + // is the result after the second run. + cmd := Command{ + UI: ui, + clientset: k8s, + } + responseCode := cmd.Run(append(defaultArgs, c.FirstRunArgs...)) + require.Equal(0, responseCode, ui.ErrorWriter.String()) + + // Second run. + // NOTE: We're redefining the command so that the old flag values are + // reset. + cmd = Command{ + UI: ui, + clientset: k8s, + } + responseCode = cmd.Run(append(defaultArgs, c.SecondRunArgs...)) + require.Equal(0, responseCode, ui.ErrorWriter.String()) + + // Now check that everything is as expected. + bootToken := getBootToken(t, k8s, resourcePrefix, ns) + consul, err := api.NewClient(&api.Config{ + Address: testAgent.HTTPAddr, + Token: bootToken, + }) + require.NoError(err) + + // Check the ACL auth method is as expected. + authMethodName := releaseName + "-consul-k8s-auth-method" + method, _, err := consul.ACL().AuthMethodRead(authMethodName, &api.QueryOptions{ + Namespace: c.AuthMethodExpectedNS, + }) + require.NoError(err) + require.NotNil(method, authMethodName+" not found") + if c.AuthMethodExpectMapNamespacesConfig { + require.Contains(method.Config, "MapNamespaces") + require.Contains(method.Config, "ConsulNamespacePrefix") + require.Equal(true, method.Config["MapNamespaces"]) + require.Equal(c.AuthMethodExpectedNamespacePrefixConfig, method.Config["ConsulNamespacePrefix"]) + } else { + require.NotContains(method.Config, "MapNamespaces") + require.NotContains(method.Config, "ConsulNamespacePrefix") + } + + // Check the binding rule is as expected. + rules, _, err := consul.ACL().BindingRuleList(authMethodName, &api.QueryOptions{ + Namespace: c.BindingRuleExpectedNS, + }) + require.NoError(err) + require.Len(rules, 1) + }) + } +} + +// Set up test consul agent and kubernetes cluster. +func completeEnterpriseSetup(t *testing.T, prefix string, k8sNamespace string) (*fake.Clientset, *testutil.TestServer) { + k8s := fake.NewSimpleClientset() + + svr, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.ACL.Enabled = true + }) + require.NoError(t, err) + + createTestK8SResources(t, k8s, svr.HTTPAddr, prefix, "http", k8sNamespace) + + return k8s, svr +} diff --git a/subcommand/server-acl-init/command_test.go b/subcommand/server-acl-init/command_test.go index 5b9fee4cad..12a0a4c829 100644 --- a/subcommand/server-acl-init/command_test.go +++ b/subcommand/server-acl-init/command_test.go @@ -4,16 +4,7 @@ import ( "crypto/x509" "encoding/base64" "fmt" - "github.com/hashicorp/consul/agent" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/tlsutil" - "github.com/mitchellh/cli" - "github.com/stretchr/testify/require" "io/ioutil" - appv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" "math/rand" "net" "net/http" @@ -23,6 +14,16 @@ import ( "strconv" "testing" "time" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/tlsutil" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + appv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" ) var ns = "default" @@ -92,7 +93,7 @@ func TestRun_Defaults(t *testing.T) { require.Equal(0, responseCode, ui.ErrorWriter.String()) // Test that the bootstrap kube secret is created. - bootToken := getBootToken(t, k8s, resourcePrefix) + bootToken := getBootToken(t, k8s, resourcePrefix, ns) // Check that it has the right policies. consul := testAgent.Client() @@ -160,6 +161,19 @@ func TestRun_Tokens(t *testing.T) { TokenName: "catalog-sync", SecretName: "my-prefix-catalog-sync-acl-token", }, + "connect-inject-namespace token -release-name": { + TokenFlag: "-create-inject-namespace-token", + ResourcePrefixFlag: "", + ReleaseNameFlag: "release-name", + TokenName: "connect-inject", + SecretName: "release-name-consul-connect-inject-acl-token", + }, + "connect-inject-namespace token -resource-prefix": { + TokenFlag: "-create-inject-namespace-token", + ResourcePrefixFlag: "my-prefix", + TokenName: "connect-inject", + SecretName: "my-prefix-connect-inject-acl-token", + }, "enterprise-license token -release-name": { TokenFlag: "-create-enterprise-license-token", ResourcePrefixFlag: "", @@ -222,7 +236,7 @@ func TestRun_Tokens(t *testing.T) { require.Equal(0, responseCode, ui.ErrorWriter.String()) // Check that the client policy was created. - bootToken := getBootToken(t, k8s, prefix) + bootToken := getBootToken(t, k8s, prefix, ns) consul := testAgent.Client() policies, _, err := consul.ACL().PolicyList(&api.QueryOptions{Token: bootToken}) require.NoError(err) @@ -286,7 +300,7 @@ func TestRun_AllowDNS(t *testing.T) { require.Equal(0, responseCode, ui.ErrorWriter.String()) // Check that the dns policy was created. - bootToken := getBootToken(t, k8s, resourcePrefix) + bootToken := getBootToken(t, k8s, resourcePrefix, ns) consul := testAgent.Client() policies, _, err := consul.ACL().PolicyList(&api.QueryOptions{Token: bootToken}) require.NoError(err) @@ -317,104 +331,156 @@ func TestRun_AllowDNS(t *testing.T) { }) } -func TestRun_ConnectInjectToken(t *testing.T) { +func TestRun_ConnectInjectAuthMethod(t *testing.T) { t.Parallel() - k8s, testAgent := completeSetup(t, resourcePrefix) - defer testAgent.Shutdown() - require := require.New(t) - - // Create Kubernetes Service. - _, err := k8s.CoreV1().Services(ns).Create(&v1.Service{ - Spec: v1.ServiceSpec{ - ClusterIP: "1.2.3.4", + cases := map[string]struct { + AuthMethodFlag string + }{ + "-create-inject-token flag": { + AuthMethodFlag: "-create-inject-token", }, - ObjectMeta: metav1.ObjectMeta{ - Name: "kubernetes", + "-create-inject-auth-method flag": { + AuthMethodFlag: "-create-inject-auth-method", }, - }) - require.NoError(err) + } + for testName, c := range cases { + t.Run(testName, func(tt *testing.T) { - // Create ServiceAccount for the injector that the helm chart creates. - _, err = k8s.CoreV1().ServiceAccounts(ns).Create(&v1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourcePrefix + "-connect-injector-authmethod-svc-account", - }, - Secrets: []v1.ObjectReference{ - { - Name: resourcePrefix + "-connect-injector-authmethod-svc-accohndbv", - }, - }, - }) - require.NoError(err) + k8s, testAgent := completeSetup(tt, resourcePrefix) + defer testAgent.Shutdown() + caCert, jwtToken := setUpK8sServiceAccount(tt, k8s) + require := require.New(tt) - // Create the ServiceAccount Secret. - caCertBytes, err := base64.StdEncoding.DecodeString(serviceAccountCACert) - require.NoError(err) - tokenBytes, err := base64.StdEncoding.DecodeString(serviceAccountToken) - require.NoError(err) - _, err = k8s.CoreV1().Secrets(ns).Create(&v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourcePrefix + "-connect-injector-authmethod-svc-accohndbv", - }, - Data: map[string][]byte{ - "ca.crt": caCertBytes, - "token": tokenBytes, - }, - }) - require.NoError(err) + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + cmd.init() + bindingRuleSelector := "serviceaccount.name!=default" + cmdArgs := []string{ + "-server-label-selector=component=server,app=consul,release=" + releaseName, + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-expected-replicas=1", + "-acl-binding-rule-selector=" + bindingRuleSelector, + } + cmdArgs = append(cmdArgs, c.AuthMethodFlag) + responseCode := cmd.Run(cmdArgs) + require.Equal(0, responseCode, ui.ErrorWriter.String()) - // Run the command. - ui := cli.NewMockUi() - cmd := Command{ - UI: ui, - clientset: k8s, + // Check that the auth method was created. + bootToken := getBootToken(t, k8s, resourcePrefix, ns) + consul := testAgent.Client() + authMethodName := resourcePrefix + "-k8s-auth-method" + authMethod, _, err := consul.ACL().AuthMethodRead(authMethodName, + &api.QueryOptions{Token: bootToken}) + require.NoError(err) + require.Contains(authMethod.Config, "Host") + require.Equal(authMethod.Config["Host"], "https://1.2.3.4:443") + require.Contains(authMethod.Config, "CACert") + require.Equal(authMethod.Config["CACert"], caCert) + require.Contains(authMethod.Config, "ServiceAccountJWT") + require.Equal(authMethod.Config["ServiceAccountJWT"], jwtToken) + + // Check that the binding rule was created. + rules, _, err := consul.ACL().BindingRuleList(authMethodName, &api.QueryOptions{Token: bootToken}) + require.NoError(err) + require.Len(rules, 1) + require.Equal("service", string(rules[0].BindType)) + require.Equal("${serviceaccount.name}", rules[0].BindName) + require.Equal(bindingRuleSelector, rules[0].Selector) + + // Test that if the same command is re-run it doesn't error. + t.Run("retried", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + cmd.init() + responseCode := cmd.Run(cmdArgs) + require.Equal(0, responseCode, ui.ErrorWriter.String()) + }) + }) } - cmd.init() - bindingRuleSelector := "serviceaccount.name!=default" - cmdArgs := []string{ +} + +// Test that ACL binding rules are updated if the rule selector changes. +func TestRun_BindingRuleUpdates(t *testing.T) { + t.Parallel() + k8s, agent := completeSetup(t, resourcePrefix) + setUpK8sServiceAccount(t, k8s) + defer agent.Shutdown() + require := require.New(t) + consul := agent.Client() + + ui := cli.NewMockUi() + commonArgs := []string{ "-server-label-selector=component=server,app=consul,release=" + releaseName, "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, "-expected-replicas=1", - "-create-inject-token", - "-acl-binding-rule-selector=" + bindingRuleSelector, + "-create-inject-auth-method", } - responseCode := cmd.Run(cmdArgs) + firstRunArgs := append(commonArgs, + "-acl-binding-rule-selector=serviceaccount.name!=default", + ) + // Our second run, we change the binding rule selector. + secondRunArgs := append(commonArgs, + "-acl-binding-rule-selector=serviceaccount.name!=changed", + ) + + // Run the command first to populate the binding rule. + cmd := Command{ + UI: ui, + clientset: k8s, + } + responseCode := cmd.Run(firstRunArgs) require.Equal(0, responseCode, ui.ErrorWriter.String()) - // Check that the auth method was created. - bootToken := getBootToken(t, k8s, resourcePrefix) - consul := testAgent.Client() - authMethodName := resourcePrefix + "-k8s-auth-method" - authMethod, _, err := consul.ACL().AuthMethodRead(authMethodName, - &api.QueryOptions{Token: bootToken}) - require.NoError(err) - require.Contains(authMethod.Config, "Host") - require.Equal(authMethod.Config["Host"], "https://1.2.3.4:443") - require.Contains(authMethod.Config, "CACert") - require.Equal(authMethod.Config["CACert"], string(caCertBytes)) - require.Contains(authMethod.Config, "ServiceAccountJWT") - require.Equal(authMethod.Config["ServiceAccountJWT"], string(tokenBytes)) - - // Check that the binding rule was created. - rules, _, err := consul.ACL().BindingRuleList(authMethodName, &api.QueryOptions{Token: bootToken}) - require.NoError(err) - require.Len(rules, 1) - require.Equal("service", string(rules[0].BindType)) - require.Equal("${serviceaccount.name}", rules[0].BindName) - require.Equal(bindingRuleSelector, rules[0].Selector) + // Validate the binding rule. + { + queryOpts := &api.QueryOptions{Token: getBootToken(t, k8s, resourcePrefix, ns)} + authMethodName := releaseName + "-consul-k8s-auth-method" + rules, _, err := consul.ACL().BindingRuleList(authMethodName, queryOpts) + require.NoError(err) + require.Len(rules, 1) + actRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, queryOpts) + require.NoError(err) + require.NotNil(actRule) + require.Equal("Kubernetes binding rule", actRule.Description) + require.Equal(api.BindingRuleBindTypeService, actRule.BindType) + require.Equal("${serviceaccount.name}", actRule.BindName) + require.Equal("serviceaccount.name!=default", actRule.Selector) + } - // Test that if the same command is re-run it doesn't error. - t.Run("retried", func(t *testing.T) { - ui := cli.NewMockUi() - cmd := Command{ - UI: ui, - clientset: k8s, - } - cmd.init() - responseCode := cmd.Run(cmdArgs) - require.Equal(0, responseCode, ui.ErrorWriter.String()) - }) + // Re-run the command with namespace flags. The policies should be updated. + // NOTE: We're redefining the command so that the old flag values are + // reset. + cmd = Command{ + UI: ui, + clientset: k8s, + } + responseCode = cmd.Run(secondRunArgs) + require.Equal(0, responseCode, ui.ErrorWriter.String()) + + // Check the binding rule is changed expected. + { + queryOpts := &api.QueryOptions{Token: getBootToken(t, k8s, resourcePrefix, ns)} + authMethodName := releaseName + "-consul-k8s-auth-method" + rules, _, err := consul.ACL().BindingRuleList(authMethodName, queryOpts) + require.NoError(err) + require.Len(rules, 1) + actRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, queryOpts) + require.NoError(err) + require.NotNil(actRule) + require.Equal("Kubernetes binding rule", actRule.Description) + require.Equal(api.BindingRuleBindTypeService, actRule.BindType) + require.Equal("${serviceaccount.name}", actRule.BindName) + require.Equal("serviceaccount.name!=changed", actRule.Selector) + } } // Test that if the server pods aren't available at first that bootstrap @@ -527,7 +593,7 @@ func TestRun_DelayedServerPods(t *testing.T) { } // Test that the bootstrap kube secret is created. - getBootToken(t, k8s, resourcePrefix) + getBootToken(t, k8s, resourcePrefix, ns) // Test that the expected API calls were made. require.Equal([]APICall{ @@ -683,7 +749,7 @@ func TestRun_InProgressDeployment(t *testing.T) { } // Test that the bootstrap kube secret is created. - getBootToken(t, k8s, resourcePrefix) + getBootToken(t, k8s, resourcePrefix, ns) // Test that the expected API calls were made. require.Equal([]APICall{ @@ -829,7 +895,7 @@ func TestRun_NoLeader(t *testing.T) { } // Test that the bootstrap kube secret is created. - getBootToken(t, k8s, resourcePrefix) + getBootToken(t, k8s, resourcePrefix, ns) // Test that the expected API calls were made. require.Equal([]APICall{ @@ -1118,7 +1184,12 @@ func TestRun_AlreadyBootstrapped(t *testing.T) { // Test that the expected API calls were made. require.Equal([]APICall{ - // We only expect the calls for creating client tokens. + // We only expect the calls for creating client tokens + // and updating the server policy. + { + "PUT", + "/v1/acl/policy", + }, { "PUT", "/v1/acl/policy", @@ -1179,7 +1250,7 @@ func TestRun_HTTPS(t *testing.T) { a.Start() defer a.Shutdown() - createTestK8SResources(t, k8s, a, resourcePrefix, "https") + createTestK8SResources(t, k8s, a.HTTPAddr(), resourcePrefix, "https", ns) // Run the command. ui := cli.NewMockUi() @@ -1218,21 +1289,21 @@ func completeSetup(t *testing.T, prefix string) (*fake.Clientset, *agent.TestAge enabled = true }`) - createTestK8SResources(t, k8s, a, prefix, "http") + createTestK8SResources(t, k8s, a.HTTPAddr(), prefix, "http", ns) return k8s, a } // Create test k8s resources (server pods and server stateful set) -func createTestK8SResources(t *testing.T, k8s *fake.Clientset, a *agent.TestAgent, prefix, scheme string) { +func createTestK8SResources(t *testing.T, k8s *fake.Clientset, consulHTTPAddr, prefix, scheme, k8sNamespace string) { require := require.New(t) - consulURL, err := url.Parse("http://" + a.HTTPAddr()) + consulURL, err := url.Parse("http://" + consulHTTPAddr) require.NoError(err) port, err := strconv.Atoi(consulURL.Port()) require.NoError(err) // Create Consul server Pod. - _, err = k8s.CoreV1().Pods(ns).Create(&v1.Pod{ + _, err = k8s.CoreV1().Pods(k8sNamespace).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: prefix + "-server-0", Labels: map[string]string{ @@ -1261,7 +1332,7 @@ func createTestK8SResources(t *testing.T, k8s *fake.Clientset, a *agent.TestAgen require.NoError(err) // Create Consul server Statefulset. - _, err = k8s.AppsV1().StatefulSets(ns).Create(&appv1.StatefulSet{ + _, err = k8s.AppsV1().StatefulSets(k8sNamespace).Create(&appv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: prefix + "-server", Labels: map[string]string{ @@ -1280,8 +1351,8 @@ func createTestK8SResources(t *testing.T, k8s *fake.Clientset, a *agent.TestAgen // getBootToken gets the bootstrap token from the Kubernetes secret. It will // cause a test failure if the Secret doesn't exist or is malformed. -func getBootToken(t *testing.T, k8s *fake.Clientset, prefix string) string { - bootstrapSecret, err := k8s.CoreV1().Secrets(ns).Get(fmt.Sprintf("%s-bootstrap-acl-token", prefix), metav1.GetOptions{}) +func getBootToken(t *testing.T, k8s *fake.Clientset, prefix string, k8sNamespace string) string { + bootstrapSecret, err := k8s.CoreV1().Secrets(k8sNamespace).Get(fmt.Sprintf("%s-bootstrap-acl-token", prefix), metav1.GetOptions{}) require.NoError(t, err) require.NotNil(t, bootstrapSecret) bootToken, ok := bootstrapSecret.Data["token"] @@ -1346,5 +1417,52 @@ func generateServerCerts(t *testing.T) (string, string, string, func()) { return caFile.Name(), certFile.Name(), certKeyFile.Name(), cleanupFunc } +// setUpK8sServiceAccount creates a Service Account for the connect injector. +// This Service Account would normally automatically be created by Kubernetes +// when the injector deployment is created. It returns the Service Account +// CA Cert and JWT token. +func setUpK8sServiceAccount(t *testing.T, k8s *fake.Clientset) (string, string) { + // Create Kubernetes Service. + _, err := k8s.CoreV1().Services(ns).Create(&v1.Service{ + Spec: v1.ServiceSpec{ + ClusterIP: "1.2.3.4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "kubernetes", + }, + }) + require.NoError(t, err) + + // Create ServiceAccount for the injector that the helm chart creates. + _, err = k8s.CoreV1().ServiceAccounts(ns).Create(&v1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourcePrefix + "-connect-injector-authmethod-svc-account", + }, + Secrets: []v1.ObjectReference{ + { + Name: resourcePrefix + "-connect-injector-authmethod-svc-account", + }, + }, + }) + require.NoError(t, err) + + // Create the ServiceAccount Secret. + caCertBytes, err := base64.StdEncoding.DecodeString(serviceAccountCACert) + require.NoError(t, err) + tokenBytes, err := base64.StdEncoding.DecodeString(serviceAccountToken) + require.NoError(t, err) + _, err = k8s.CoreV1().Secrets(ns).Create(&v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourcePrefix + "-connect-injector-authmethod-svc-account", + }, + Data: map[string][]byte{ + "ca.crt": caCertBytes, + "token": tokenBytes, + }, + }) + require.NoError(t, err) + return string(caCertBytes), string(tokenBytes) +} + var serviceAccountCACert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURDekNDQWZPZ0F3SUJBZ0lRS3pzN05qbDlIczZYYzhFWG91MjVoekFOQmdrcWhraUc5dzBCQVFzRkFEQXYKTVMwd0t3WURWUVFERXlRMU9XVTJaR00wTVMweU1EaG1MVFF3T1RVdFlUSTRPUzB4Wm1NM01EQmhZekZqWXpndwpIaGNOTVRrd05qQTNNVEF4TnpNeFdoY05NalF3TmpBMU1URXhOek14V2pBdk1TMHdLd1lEVlFRREV5UTFPV1UyClpHTTBNUzB5TURobUxUUXdPVFV0WVRJNE9TMHhabU0zTURCaFl6RmpZemd3Z2dFaU1BMEdDU3FHU0liM0RRRUIKQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURaakh6d3FvZnpUcEdwYzBNZElDUzdldXZmdWpVS0UzUEMvYXBmREFnQgo0anpFRktBNzgvOStLVUd3L2MvMFNIZVNRaE4rYThnd2xIUm5BejFOSmNmT0lYeTRkd2VVdU9rQWlGeEg4cGh0CkVDd2tlTk83ejhEb1Y4Y2VtaW5DUkhHamFSbW9NeHBaN2cycFpBSk5aZVB4aTN5MWFOa0ZBWGU5Z1NVU2RqUloKUlhZa2E3d2gyQU85azJkbEdGQVlCK3Qzdld3SjZ0d2pHMFR0S1FyaFlNOU9kMS9vTjBFMDFMekJjWnV4a04xawo4Z2ZJSHk3Yk9GQ0JNMldURURXLzBhQXZjQVByTzhETHFESis2TWpjM3I3K3psemw4YVFzcGIwUzA4cFZ6a2k1CkR6Ly84M2t5dTBwaEp1aWo1ZUI4OFY3VWZQWHhYRi9FdFY2ZnZyTDdNTjRmQWdNQkFBR2pJekFoTUE0R0ExVWQKRHdFQi93UUVBd0lDQkRBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFCdgpRc2FHNnFsY2FSa3RKMHpHaHh4SjUyTm5SVjJHY0lZUGVOM1p2MlZYZTNNTDNWZDZHMzJQVjdsSU9oangzS21BCi91TWg2TmhxQnpzZWtrVHowUHVDM3dKeU0yT0dvblZRaXNGbHF4OXNGUTNmVTJtSUdYQ2Ezd0M4ZS9xUDhCSFMKdzcvVmVBN2x6bWozVFFSRS9XMFUwWkdlb0F4bjliNkp0VDBpTXVjWXZQMGhYS1RQQldsbnpJaWphbVU1MHIyWQo3aWEwNjVVZzJ4VU41RkxYL3Z4T0EzeTRyanBraldvVlFjdTFwOFRaclZvTTNkc0dGV3AxMGZETVJpQUhUdk9ICloyM2pHdWs2cm45RFVIQzJ4UGozd0NUbWQ4U0dFSm9WMzFub0pWNWRWZVE5MHd1c1h6M3ZURzdmaWNLbnZIRlMKeHRyNVBTd0gxRHVzWWZWYUdIMk8KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" var serviceAccountToken = "ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklpSjkuZXlKcGMzTWlPaUpyZFdKbGNtNWxkR1Z6TDNObGNuWnBZMlZoWTJOdmRXNTBJaXdpYTNWaVpYSnVaWFJsY3k1cGJ5OXpaWEoyYVdObFlXTmpiM1Z1ZEM5dVlXMWxjM0JoWTJVaU9pSmtaV1poZFd4MElpd2lhM1ZpWlhKdVpYUmxjeTVwYnk5elpYSjJhV05sWVdOamIzVnVkQzl6WldOeVpYUXVibUZ0WlNJNkltdG9ZV3RwTFdGeVlXTm9ibWxrTFdOdmJuTjFiQzFqYjI1dVpXTjBMV2x1YW1WamRHOXlMV0YxZEdodFpYUm9iMlF0YzNaakxXRmpZMjlvYm1SaWRpSXNJbXQxWW1WeWJtVjBaWE11YVc4dmMyVnlkbWxqWldGalkyOTFiblF2YzJWeWRtbGpaUzFoWTJOdmRXNTBMbTVoYldVaU9pSnJhR0ZyYVMxaGNtRmphRzVwWkMxamIyNXpkV3d0WTI5dWJtVmpkQzFwYm1wbFkzUnZjaTFoZFhSb2JXVjBhRzlrTFhOMll5MWhZMk52ZFc1MElpd2lhM1ZpWlhKdVpYUmxjeTVwYnk5elpYSjJhV05sWVdOamIzVnVkQzl6WlhKMmFXTmxMV0ZqWTI5MWJuUXVkV2xrSWpvaU4yVTVOV1V4TWprdFpUUTNNeTB4TVdVNUxUaG1ZV0V0TkRJd01UQmhPREF3TVRJeUlpd2ljM1ZpSWpvaWMzbHpkR1Z0T25ObGNuWnBZMlZoWTJOdmRXNTBPbVJsWm1GMWJIUTZhMmhoYTJrdFlYSmhZMmh1YVdRdFkyOXVjM1ZzTFdOdmJtNWxZM1F0YVc1cVpXTjBiM0l0WVhWMGFHMWxkR2h2WkMxemRtTXRZV05qYjNWdWRDSjkuWWk2M01NdHpoNU1CV0tLZDNhN2R6Q0pqVElURTE1aWtGeV9UbnBka19Bd2R3QTlKNEFNU0dFZUhONXZXdEN1dUZqb19sTUpxQkJQSGtLMkFxYm5vRlVqOW01Q29wV3lxSUNKUWx2RU9QNGZVUS1SYzBXMVBfSmpVMXJaRVJIRzM5YjVUTUxnS1BRZ3V5aGFpWkVKNkNqVnRtOXdVVGFncmdpdXFZVjJpVXFMdUY2U1lObTZTckt0a1BTLWxxSU8tdTdDMDZ3Vms1bTV1cXdJVlFOcFpTSUNfNUxzNWFMbXlaVTNuSHZILVY3RTNIbUJoVnlaQUI3NmpnS0IwVHlWWDFJT3NrdDlQREZhck50VTNzdVp5Q2p2cUMtVUpBNnNZZXlTZTRkQk5Lc0tsU1o2WXV4VVVtbjFSZ3YzMllNZEltbnNXZzhraGYtekp2cWdXazdCNUVB" diff --git a/subcommand/server-acl-init/connect_inject.go b/subcommand/server-acl-init/connect_inject.go new file mode 100644 index 0000000000..08c009540a --- /dev/null +++ b/subcommand/server-acl-init/connect_inject.go @@ -0,0 +1,219 @@ +package serveraclinit + +import ( + "errors" + "fmt" + + "github.com/hashicorp/consul/api" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// configureConnectInject sets up auth methods so that connect injection will +// work. +func (c *Command) configureConnectInject(consulClient *api.Client) error { + + authMethodName := c.withPrefix("k8s-auth-method") + + // If not running namespaces, check if there's already an auth method. + // This means no changes need to be made to it. Binding rules should + // still be checked in case a user has updated their config. + var createAuthMethod bool + if !c.flagEnableNamespaces { + // Check if an auth method exists with the given name + err := c.untilSucceeds(fmt.Sprintf("checking if %s auth method exists", authMethodName), + func() error { + am, _, err := consulClient.ACL().AuthMethodRead(authMethodName, &api.QueryOptions{}) + // This call returns nil if an AuthMethod does + // not exist with that name. This means we will + // need to create one. + if err == nil && am == nil { + createAuthMethod = true + } + return err + }) + if err != nil { + return err + } + } + + // If namespaces are enabled, a namespace configuration change may need + // the auth method to be updated (as with a different mirroring prefix) + // or a new auth method created (if a new destination namespace is specified). + if c.flagEnableNamespaces || createAuthMethod { + // Create the auth method template. This requires calls to the + // kubernetes environment. + authMethodTmpl, err := c.createAuthMethodTmpl(authMethodName) + if err != nil { + return err + } + + // Set up the auth method in the specific namespace if not mirroring + // If namespaces and mirroring are enabled, this is not necessary because + // the auth method will fall back to being created in the Consul `default` + // namespace automatically, as is necessary for mirroring. + // Note: if the config changes, an auth method will be created in the + // correct namespace, but the old auth method will not be removed. + writeOptions := api.WriteOptions{} + if c.flagEnableNamespaces && !c.flagEnableInjectK8SNSMirroring { + writeOptions.Namespace = c.flagConsulInjectDestinationNamespace + + if c.flagConsulInjectDestinationNamespace != "default" { + // If not the default namespace, check if it exists, creating it + // if necessary. The Consul namespace must exist for the AuthMethod + // to be created there. + err = c.untilSucceeds(fmt.Sprintf("checking or creating namespace %s", + c.flagConsulInjectDestinationNamespace), + func() error { + err := c.checkAndCreateNamespace(c.flagConsulInjectDestinationNamespace, consulClient) + return err + }) + if err != nil { + return err + } + } + } + + err = c.untilSucceeds(fmt.Sprintf("creating auth method %s", authMethodTmpl.Name), + func() error { + var err error + // `AuthMethodCreate` will also be able to update an existing + // AuthMethod based on the name provided. This means that any namespace + // configuration changes will correctly update the AuthMethod. + _, _, err = consulClient.ACL().AuthMethodCreate(&authMethodTmpl, &writeOptions) + return err + }) + if err != nil { + return err + } + } + + // Create the binding rule. + abr := api.ACLBindingRule{ + Description: "Kubernetes binding rule", + AuthMethod: authMethodName, + BindType: api.BindingRuleBindTypeService, + BindName: "${serviceaccount.name}", + Selector: c.flagBindingRuleSelector, + } + + // Binding rule list api call query options + queryOptions := api.QueryOptions{} + + // Add a namespace if appropriate + // If namespaces and mirroring are enabled, this is not necessary because + // the binding rule will fall back to being created in the Consul `default` + // namespace automatically, as is necessary for mirroring. + if c.flagEnableNamespaces && !c.flagEnableInjectK8SNSMirroring { + abr.Namespace = c.flagConsulInjectDestinationNamespace + queryOptions.Namespace = c.flagConsulInjectDestinationNamespace + } + + var existingRules []*api.ACLBindingRule + err := c.untilSucceeds(fmt.Sprintf("listing binding rules for auth method %s", authMethodName), + func() error { + var err error + existingRules, _, err = consulClient.ACL().BindingRuleList(authMethodName, &queryOptions) + return err + }) + if err != nil { + return err + } + + // If the binding rule already exists, update it + // This updates the binding rule any time the acl bootstrapping + // command is rerun, which is a bit of extra overhead, but is + // necessary to pick up any potential config changes. + if len(existingRules) > 0 { + // Find the policy that matches our name and description + // and that's the ID we need + for _, existingRule := range existingRules { + if existingRule.BindName == abr.BindName && existingRule.Description == abr.Description { + abr.ID = existingRule.ID + } + } + + // This will only happen if there are existing policies + // for this auth method, but none that match the binding + // rule set up here in the bootstrap method. + if abr.ID == "" { + return errors.New("Unable to find a matching ACL binding rule to update") + } + + err = c.untilSucceeds(fmt.Sprintf("updating acl binding rule for %s", authMethodName), + func() error { + _, _, err := consulClient.ACL().BindingRuleUpdate(&abr, nil) + return err + }) + } else { + // Otherwise create the binding rule + err = c.untilSucceeds(fmt.Sprintf("creating acl binding rule for %s", authMethodName), + func() error { + _, _, err := consulClient.ACL().BindingRuleCreate(&abr, nil) + return err + }) + } + return err +} + +func (c *Command) createAuthMethodTmpl(authMethodName string) (api.ACLAuthMethod, error) { + var kubeSvc *apiv1.Service + err := c.untilSucceeds("getting kubernetes service IP", + func() error { + var err error + kubeSvc, err = c.clientset.CoreV1().Services("default").Get("kubernetes", metav1.GetOptions{}) + return err + }) + if err != nil { + return api.ACLAuthMethod{}, err + } + + // Get the Secret name for the auth method ServiceAccount. + var authMethodServiceAccount *apiv1.ServiceAccount + saName := c.withPrefix("connect-injector-authmethod-svc-account") + err = c.untilSucceeds(fmt.Sprintf("getting %s ServiceAccount", saName), + func() error { + var err error + authMethodServiceAccount, err = c.clientset.CoreV1().ServiceAccounts(c.flagK8sNamespace).Get(saName, metav1.GetOptions{}) + return err + }) + if err != nil { + return api.ACLAuthMethod{}, err + } + + // ServiceAccounts always have a secret name. The secret + // contains the JWT token. + saSecretName := authMethodServiceAccount.Secrets[0].Name + + // Get the secret that will contain the ServiceAccount JWT token. + var saSecret *apiv1.Secret + err = c.untilSucceeds(fmt.Sprintf("getting %s Secret", saSecretName), + func() error { + var err error + saSecret, err = c.clientset.CoreV1().Secrets(c.flagK8sNamespace).Get(saSecretName, metav1.GetOptions{}) + return err + }) + if err != nil { + return api.ACLAuthMethod{}, err + } + + // Now we're ready to set up Consul's auth method. + authMethodTmpl := api.ACLAuthMethod{ + Name: authMethodName, + Description: "Kubernetes AuthMethod", + Type: "kubernetes", + Config: map[string]interface{}{ + "Host": fmt.Sprintf("https://%s:443", kubeSvc.Spec.ClusterIP), + "CACert": string(saSecret.Data["ca.crt"]), + "ServiceAccountJWT": string(saSecret.Data["token"]), + }, + } + + // Add options for mirroring namespaces + if c.flagEnableNamespaces && c.flagEnableInjectK8SNSMirroring { + authMethodTmpl.Config["MapNamespaces"] = true + authMethodTmpl.Config["ConsulNamespacePrefix"] = c.flagInjectK8SNSMirroringPrefix + } + + return authMethodTmpl, nil +} diff --git a/subcommand/server-acl-init/create_or_update.go b/subcommand/server-acl-init/create_or_update.go new file mode 100644 index 0000000000..1e75cde679 --- /dev/null +++ b/subcommand/server-acl-init/create_or_update.go @@ -0,0 +1,157 @@ +package serveraclinit + +import ( + "errors" + "fmt" + "strings" + + "github.com/hashicorp/consul/api" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// createACL creates a policy with rules and name, creates an ACL token for that +// policy and then writes the token to a Kubernetes secret. +func (c *Command) createACL(name, rules string, consulClient *api.Client) error { + // Create policy with the given rules. + policyTmpl := api.ACLPolicy{ + Name: fmt.Sprintf("%s-token", name), + Description: fmt.Sprintf("%s Token Policy", name), + Rules: rules, + } + err := c.untilSucceeds(fmt.Sprintf("creating %s policy", policyTmpl.Name), + func() error { + return c.createOrUpdateACLPolicy(policyTmpl, consulClient) + }) + if err != nil { + return err + } + + // Check if the secret already exists, if so, we assume the ACL has already been + // created and return. + secretName := c.withPrefix(name + "-acl-token") + _, err = c.clientset.CoreV1().Secrets(c.flagK8sNamespace).Get(secretName, metav1.GetOptions{}) + if err == nil { + c.Log.Info(fmt.Sprintf("Secret %q already exists", secretName)) + return nil + } + + // Create token for the policy if the secret did not exist previously. + tokenTmpl := api.ACLToken{ + Description: fmt.Sprintf("%s Token", name), + Policies: []*api.ACLTokenPolicyLink{{Name: policyTmpl.Name}}, + } + var token string + err = c.untilSucceeds(fmt.Sprintf("creating token for policy %s", policyTmpl.Name), + func() error { + createdToken, _, err := consulClient.ACL().TokenCreate(&tokenTmpl, &api.WriteOptions{}) + if err == nil { + token = createdToken.SecretID + } + return err + }) + if err != nil { + return err + } + + // Write token to a Kubernetes secret. + return c.untilSucceeds(fmt.Sprintf("writing Secret for token %s", policyTmpl.Name), + func() error { + secret := &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + }, + Data: map[string][]byte{ + "token": []byte(token), + }, + } + _, err := c.clientset.CoreV1().Secrets(c.flagK8sNamespace).Create(secret) + return err + }) +} + +func (c *Command) createOrUpdateACLPolicy(policy api.ACLPolicy, consulClient *api.Client) error { + // Attempt to create the ACL policy + _, _, err := consulClient.ACL().PolicyCreate(&policy, &api.WriteOptions{}) + + // With the introduction of Consul namespaces, if someone upgrades into a + // Consul version with namespace support or changes any of their namespace + // settings, the policies associated with their ACL tokens will need to be + // updated to be namespace aware. + if isPolicyExistsErr(err, policy.Name) { + if c.flagEnableNamespaces { + c.Log.Info(fmt.Sprintf("Policy %q already exists, updating", policy.Name)) + + // The policy ID is required in any PolicyUpdate call, so first we need to + // get the existing policy to extract its ID. + existingPolicies, _, err := consulClient.ACL().PolicyList(&api.QueryOptions{}) + if err != nil { + return err + } + + // Find the policy that matches our name and description + // and that's the ID we need + for _, existingPolicy := range existingPolicies { + if existingPolicy.Name == policy.Name && existingPolicy.Description == policy.Description { + policy.ID = existingPolicy.ID + } + } + + // This shouldn't happen, because we're looking for a policy + // only after we've hit a `Policy already exists` error. + if policy.ID == "" { + return errors.New("Unable to find existing ACL policy") + } + + // Update the policy now that we've found its ID + _, _, err = consulClient.ACL().PolicyUpdate(&policy, &api.WriteOptions{}) + return err + } else { + c.Log.Info(fmt.Sprintf("Policy %q already exists, skipping update", policy.Name)) + return nil + } + } + return err +} + +func (c *Command) checkAndCreateNamespace(ns string, consulClient *api.Client) error { + // Check if the Consul namespace exists + namespaceInfo, _, err := consulClient.Namespaces().Read(ns, nil) + if err != nil { + return err + } + + // If not, create it + if namespaceInfo == nil { + // Create the ACLs config for the cross-Consul-namespace + // default policy that needs to be attached + aclConfig := api.NamespaceACLConfig{ + PolicyDefaults: []api.ACLLink{ + {Name: "cross-namespace-policy"}, + }, + } + + consulNamespace := api.Namespace{ + Name: ns, + Description: "Auto-generated by the ACL bootstrapping process", + ACLs: &aclConfig, + Meta: map[string]string{"external-source": "kubernetes"}, + } + + _, _, err = consulClient.Namespaces().Create(&consulNamespace, nil) + if err != nil { + return err + } + c.Log.Info("created consul namespace", "name", consulNamespace.Name) + } + + return nil +} + +// isPolicyExistsErr returns true if err is due to trying to call the +// policy create API when the policy already exists. +func isPolicyExistsErr(err error, policyName string) bool { + return err != nil && + strings.Contains(err.Error(), "Unexpected response code: 500") && + strings.Contains(err.Error(), fmt.Sprintf("Invalid Policy: A Policy with Name %q already exists", policyName)) +} diff --git a/subcommand/server-acl-init/dns.go b/subcommand/server-acl-init/dns.go new file mode 100644 index 0000000000..6477ac0bea --- /dev/null +++ b/subcommand/server-acl-init/dns.go @@ -0,0 +1,43 @@ +package serveraclinit + +import ( + "github.com/hashicorp/consul/api" +) + +// configureDNSPolicies sets up policies and tokens so that Consul DNS will +// work. +func (c *Command) configureDNSPolicies(consulClient *api.Client) error { + dnsRules, err := c.dnsRules() + if err != nil { + c.Log.Error("Error templating dns rules", "err", err) + return err + } + + // Create policy for the anonymous token + dnsPolicy := api.ACLPolicy{ + Name: "dns-policy", + Description: "DNS Policy", + Rules: dnsRules, + } + + err = c.untilSucceeds("creating dns policy - PUT /v1/acl/policy", + func() error { + return c.createOrUpdateACLPolicy(dnsPolicy, consulClient) + }) + if err != nil { + return err + } + + // Create token to get sent to TokenUpdate + aToken := api.ACLToken{ + AccessorID: "00000000-0000-0000-0000-000000000002", + Policies: []*api.ACLTokenPolicyLink{{Name: dnsPolicy.Name}}, + } + + // Update anonymous token to include this policy + return c.untilSucceeds("updating anonymous token with DNS policy", + func() error { + _, _, err := consulClient.ACL().TokenUpdate(&aToken, &api.WriteOptions{}) + return err + }) +} diff --git a/subcommand/server-acl-init/rules.go b/subcommand/server-acl-init/rules.go new file mode 100644 index 0000000000..0bec8238b8 --- /dev/null +++ b/subcommand/server-acl-init/rules.go @@ -0,0 +1,176 @@ +package serveraclinit + +import ( + "bytes" + "strings" + "text/template" +) + +type rulesData struct { + EnableNamespaces bool + ConsulSyncDestinationNamespace string + EnableSyncK8SNSMirroring bool + SyncK8SNSMirroringPrefix string +} + +const snapshotAgentRules = `acl = "write" +key "consul-snapshot/lock" { + policy = "write" +} +session_prefix "" { + policy = "write" +} +service "consul-snapshot" { + policy = "write" +}` + +const entLicenseRules = `operator = "write"` + +const crossNamespaceRules = `namespace_prefix "" { + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } +} ` + +func (c *Command) agentRules() (string, error) { + agentRulesTpl := ` + node_prefix "" { + policy = "write" + } +{{- if .EnableNamespaces }} +namespace_prefix "" { +{{- end }} + service_prefix "" { + policy = "read" + } +{{- if .EnableNamespaces }} +} +{{- end }} +` + + return c.renderRules(agentRulesTpl) +} + +func (c *Command) dnsRules() (string, error) { + // DNS rules need to have access to all namespaces + // to be able to resolve services in any namespace. + dnsRulesTpl := ` +{{- if .EnableNamespaces }} +namespace_prefix "" { +{{- end }} + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } +{{- if .EnableNamespaces }} +} +{{- end }} +` + + return c.renderRules(dnsRulesTpl) +} + +// This assumes users are using the default name for the service, i.e. +// "mesh-gateway". +func (c *Command) meshGatewayRules() (string, error) { + // Mesh gateways can only act as a proxy for services + // that its ACL token has access to. So, in the case of + // Consul namespaces, it needs access to all namespaces. + meshGatewayRulesTpl := ` + agent_prefix "" { + policy = "read" + } +{{- if .EnableNamespaces }} +namespace "default" { +{{- end }} + service "mesh-gateway" { + policy = "write" + } +{{- if .EnableNamespaces }} +} +namespace_prefix "" { +{{- end }} + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } +{{- if .EnableNamespaces }} +} +{{- end }} +` + + return c.renderRules(meshGatewayRulesTpl) +} + +func (c *Command) syncRules() (string, error) { + syncRulesTpl := ` + node "k8s-sync" { + policy = "write" + } +{{- if .EnableNamespaces }} +operator = "write" +{{- if .EnableSyncK8SNSMirroring }} +namespace_prefix "{{ .SyncK8SNSMirroringPrefix }}" { +{{- else }} +namespace "{{ .ConsulSyncDestinationNamespace }}" { +{{- end }} +{{- end }} + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "write" + } +{{- if .EnableNamespaces }} +} +{{- end }} +` + + return c.renderRules(syncRulesTpl) +} + +// This should only be set when namespaces are enabled. +func (c *Command) injectRules() (string, error) { + // The Connect injector only needs permissions to create namespaces + injectRulesTpl := ` +{{- if .EnableNamespaces }} +operator = "write" +{{- end }} +` + + return c.renderRules(injectRulesTpl) +} + +func (c *Command) renderRules(tmpl string) (string, error) { + // Check that it's a valid template + compiled, err := template.New("root").Parse(strings.TrimSpace(tmpl)) + if err != nil { + return "", err + } + + // Populate the data that will be used in the template. + // Not all templates will need all of the fields. + data := rulesData{ + EnableNamespaces: c.flagEnableNamespaces, + ConsulSyncDestinationNamespace: c.flagConsulSyncDestinationNamespace, + EnableSyncK8SNSMirroring: c.flagEnableSyncK8SNSMirroring, + SyncK8SNSMirroringPrefix: c.flagSyncK8SNSMirroringPrefix, + } + + // Render the template + var buf bytes.Buffer + err = compiled.Execute(&buf, &data) + if err != nil { + // Discard possible partial results on error return + return "", err + } + + return buf.String(), nil +} diff --git a/subcommand/server-acl-init/rules_test.go b/subcommand/server-acl-init/rules_test.go new file mode 100644 index 0000000000..5135bf8c73 --- /dev/null +++ b/subcommand/server-acl-init/rules_test.go @@ -0,0 +1,299 @@ +package serveraclinit + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAgentRules(t *testing.T) { + cases := []struct { + Name string + EnableNamespaces bool + Expected string + }{ + { + "Namespaces are disabled", + false, + `node_prefix "" { + policy = "write" + } + service_prefix "" { + policy = "read" + }`, + }, + { + "Namespaces are enabled", + true, + `node_prefix "" { + policy = "write" + } +namespace_prefix "" { + service_prefix "" { + policy = "read" + } +}`, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + cmd := Command{ + flagEnableNamespaces: tt.EnableNamespaces, + } + + agentRules, err := cmd.agentRules() + + require.NoError(err) + require.Equal(tt.Expected, agentRules) + }) + } +} + +func TestDNSRules(t *testing.T) { + cases := []struct { + Name string + EnableNamespaces bool + Expected string + }{ + { + "Namespaces are disabled", + false, + ` + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + }`, + }, + { + "Namespaces are enabled", + true, + ` +namespace_prefix "" { + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } +}`, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + cmd := Command{ + flagEnableNamespaces: tt.EnableNamespaces, + } + + dnsRules, err := cmd.dnsRules() + + require.NoError(err) + require.Equal(tt.Expected, dnsRules) + }) + } +} + +func TestMeshGatewayRules(t *testing.T) { + cases := []struct { + Name string + EnableNamespaces bool + Expected string + }{ + { + "Namespaces are disabled", + false, + `agent_prefix "" { + policy = "read" + } + service "mesh-gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + }`, + }, + { + "Namespaces are enabled", + true, + `agent_prefix "" { + policy = "read" + } +namespace "default" { + service "mesh-gateway" { + policy = "write" + } +} +namespace_prefix "" { + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } +}`, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + cmd := Command{ + flagEnableNamespaces: tt.EnableNamespaces, + } + + meshGatewayRules, err := cmd.meshGatewayRules() + + require.NoError(err) + require.Equal(tt.Expected, meshGatewayRules) + }) + } +} + +func TestSyncRules(t *testing.T) { + cases := []struct { + Name string + EnableNamespaces bool + ConsulSyncDestinationNamespace string + EnableSyncK8SNSMirroring bool + SyncK8SNSMirroringPrefix string + Expected string + }{ + { + "Namespaces are disabled", + false, + "sync-namespace", + true, + "prefix-", + `node "k8s-sync" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "write" + }`, + }, + { + "Namespaces are enabled, mirroring disabled", + true, + "sync-namespace", + false, + "prefix-", + `node "k8s-sync" { + policy = "write" + } +operator = "write" +namespace "sync-namespace" { + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "write" + } +}`, + }, + { + "Namespaces are enabled, mirroring enabled, prefix empty", + true, + "sync-namespace", + true, + "", + `node "k8s-sync" { + policy = "write" + } +operator = "write" +namespace_prefix "" { + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "write" + } +}`, + }, + { + "Namespaces are enabled, mirroring enabled, prefix defined", + true, + "sync-namespace", + true, + "prefix-", + `node "k8s-sync" { + policy = "write" + } +operator = "write" +namespace_prefix "prefix-" { + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "write" + } +}`, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + cmd := Command{ + flagEnableNamespaces: tt.EnableNamespaces, + flagConsulSyncDestinationNamespace: tt.ConsulSyncDestinationNamespace, + flagEnableSyncK8SNSMirroring: tt.EnableSyncK8SNSMirroring, + flagSyncK8SNSMirroringPrefix: tt.SyncK8SNSMirroringPrefix, + } + + syncRules, err := cmd.syncRules() + + require.NoError(err) + require.Equal(tt.Expected, syncRules) + }) + } +} + +func TestInjectRules(t *testing.T) { + cases := []struct { + Name string + EnableNamespaces bool + Expected string + }{ + { + "Namespaces are disabled", + false, + "", + }, + { + "Namespaces are enabled", + true, + ` +operator = "write"`, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + cmd := Command{ + flagEnableNamespaces: tt.EnableNamespaces, + } + + injectorRules, err := cmd.injectRules() + + require.NoError(err) + require.Equal(tt.Expected, injectorRules) + }) + } +} diff --git a/subcommand/server-acl-init/servers.go b/subcommand/server-acl-init/servers.go new file mode 100644 index 0000000000..3ae1d9fdf7 --- /dev/null +++ b/subcommand/server-acl-init/servers.go @@ -0,0 +1,258 @@ +package serveraclinit + +import ( + "errors" + "fmt" + "strings" + + "github.com/hashicorp/consul/api" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// podAddr is a convenience struct for passing around pod names and +// addresses for Consul servers. +type podAddr struct { + // Name is the name of the pod. + Name string + // Addr is in the form ":". + Addr string +} + +// getConsulServers returns n Consul server pods with their http addresses. +// If there are less server pods than 'n' then the function will wait. +func (c *Command) getConsulServers(n int, scheme string) ([]podAddr, error) { + var serverPods *apiv1.PodList + err := c.untilSucceeds("discovering Consul server pods", + func() error { + var err error + serverPods, err = c.clientset.CoreV1().Pods(c.flagK8sNamespace).List(metav1.ListOptions{LabelSelector: c.flagServerLabelSelector}) + if err != nil { + return err + } + + if len(serverPods.Items) == 0 { + return fmt.Errorf("no server pods with labels %q found", c.flagServerLabelSelector) + } + + if len(serverPods.Items) < n { + return fmt.Errorf("found %d servers, require %d", len(serverPods.Items), n) + } + + for _, pod := range serverPods.Items { + if pod.Status.PodIP == "" { + return fmt.Errorf("pod %s has no IP", pod.Name) + } + } + return nil + }) + if err != nil { + return nil, err + } + + var podAddrs []podAddr + for _, pod := range serverPods.Items { + var httpPort int32 + for _, p := range pod.Spec.Containers[0].Ports { + if p.Name == scheme { + httpPort = p.ContainerPort + } + } + if httpPort == 0 { + return nil, fmt.Errorf("pod %s has no port labeled '%s'", pod.Name, scheme) + } + addr := fmt.Sprintf("%s:%d", pod.Status.PodIP, httpPort) + podAddrs = append(podAddrs, podAddr{ + Name: pod.Name, + Addr: addr, + }) + } + return podAddrs, nil +} + +// bootstrapServers bootstraps ACLs and ensures each server has an ACL token. +func (c *Command) bootstrapServers(bootTokenSecretName, scheme string) (string, error) { + serverPods, err := c.getConsulServers(c.flagReplicas, scheme) + if err != nil { + return "", err + } + c.Log.Info(fmt.Sprintf("Found %d Consul server Pods", len(serverPods))) + + // Pick the first pod to connect to for bootstrapping and set up connection. + firstServerAddr := serverPods[0].Addr + consulClient, err := api.NewClient(&api.Config{ + Address: firstServerAddr, + Scheme: scheme, + TLSConfig: api.TLSConfig{ + Address: c.flagConsulTLSServerName, + CAFile: c.flagConsulCACert, + }, + }) + if err != nil { + return "", fmt.Errorf("creating Consul client for address %s: %s", firstServerAddr, err) + } + + // Call bootstrap ACLs API. + var bootstrapToken []byte + var unrecoverableErr error + err = c.untilSucceeds("bootstrapping ACLs - PUT /v1/acl/bootstrap", + func() error { + bootstrapResp, _, err := consulClient.ACL().Bootstrap() + if err == nil { + bootstrapToken = []byte(bootstrapResp.SecretID) + return nil + } + + // Check if already bootstrapped. + if strings.Contains(err.Error(), "Unexpected response code: 403") { + unrecoverableErr = errors.New("ACLs already bootstrapped but the ACL token was not written to a Kubernetes secret." + + " We can't proceed because the bootstrap token is lost." + + " You must reset ACLs.") + return nil + } + + if isNoLeaderErr(err) { + // Return a more descriptive error in the case of no leader + // being elected. + return fmt.Errorf("no leader elected: %s", err) + } + return err + }) + if unrecoverableErr != nil { + return "", unrecoverableErr + } + if err != nil { + return "", err + } + + // Write bootstrap token to a Kubernetes secret. + err = c.untilSucceeds(fmt.Sprintf("writing bootstrap Secret %q", bootTokenSecretName), + func() error { + secret := &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: bootTokenSecretName, + }, + Data: map[string][]byte{ + "token": bootstrapToken, + }, + } + _, err := c.clientset.CoreV1().Secrets(c.flagK8sNamespace).Create(secret) + return err + }) + if err != nil { + return "", err + } + + // Override our original client with a new one that has the bootstrap token + // set. + consulClient, err = api.NewClient(&api.Config{ + Address: firstServerAddr, + Scheme: scheme, + Token: string(bootstrapToken), + TLSConfig: api.TLSConfig{ + Address: c.flagConsulTLSServerName, + CAFile: c.flagConsulCACert, + }, + }) + if err != nil { + return "", fmt.Errorf("creating Consul client for address %s: %s", firstServerAddr, err) + } + + // Create new tokens for each server and apply them. + if err := c.setServerTokens(consulClient, serverPods, string(bootstrapToken), scheme); err != nil { + return "", err + } + return string(bootstrapToken), nil +} + +// setServerTokens creates policies and associated ACL token for each server +// and then provides the token to the server. +func (c *Command) setServerTokens(consulClient *api.Client, + serverPods []podAddr, bootstrapToken, scheme string) error { + + agentPolicy, err := c.setServerPolicy(consulClient) + if err != nil { + return err + } + + // Create agent token for each server agent. + var serverTokens []api.ACLToken + for _, pod := range serverPods { + var token *api.ACLToken + err := c.untilSucceeds(fmt.Sprintf("creating server token for %s - PUT /v1/acl/token", pod.Name), + func() error { + tokenReq := api.ACLToken{ + Description: fmt.Sprintf("Server Token for %s", pod.Name), + Policies: []*api.ACLTokenPolicyLink{{Name: agentPolicy.Name}}, + } + var err error + token, _, err = consulClient.ACL().TokenCreate(&tokenReq, nil) + return err + }) + if err != nil { + return err + } + serverTokens = append(serverTokens, *token) + } + + // Pass out agent tokens to servers. + for i, pod := range serverPods { + // We create a new client for each server because we need to call each + // server specifically. + serverClient, err := api.NewClient(&api.Config{ + Address: pod.Addr, + Scheme: scheme, + Token: bootstrapToken, + TLSConfig: api.TLSConfig{ + Address: c.flagConsulTLSServerName, + CAFile: c.flagConsulCACert, + }, + }) + if err != nil { + return fmt.Errorf(" creating Consul client for address %q: %s", pod.Addr, err) + } + podName := pod.Name + + // Update token. + err = c.untilSucceeds(fmt.Sprintf("updating server token for %s - PUT /v1/agent/token/agent", podName), + func() error { + _, err := serverClient.Agent().UpdateAgentACLToken(serverTokens[i].SecretID, nil) + return err + }) + if err != nil { + return err + } + } + return nil +} + +func (c *Command) setServerPolicy(consulClient *api.Client) (api.ACLPolicy, error) { + agentRules, err := c.agentRules() + if err != nil { + c.Log.Error("Error templating server agent rules", "err", err) + return api.ACLPolicy{}, err + } + + // Create agent policy. + agentPolicy := api.ACLPolicy{ + Name: "agent-token", + Description: "Agent Token Policy", + Rules: agentRules, + } + err = c.untilSucceeds("creating agent policy - PUT /v1/acl/policy", + func() error { + return c.createOrUpdateACLPolicy(agentPolicy, consulClient) + }) + if err != nil { + return api.ACLPolicy{}, err + } + + return agentPolicy, nil +} + +// isNoLeaderErr returns true if err is due to trying to call the +// bootstrap ACLs API when there is no leader elected. +func isNoLeaderErr(err error) bool { + return err != nil && strings.Contains(err.Error(), "Unexpected response code: 500") && + strings.Contains(err.Error(), "The ACL system is currently in legacy mode.") +} diff --git a/subcommand/sync-catalog/command.go b/subcommand/sync-catalog/command.go index d9704e55f4..b9738f2145 100644 --- a/subcommand/sync-catalog/command.go +++ b/subcommand/sync-catalog/command.go @@ -10,6 +10,7 @@ import ( "sync" "time" + "github.com/deckarep/golang-set" catalogtoconsul "github.com/hashicorp/consul-k8s/catalog/to-consul" catalogtok8s "github.com/hashicorp/consul-k8s/catalog/to-k8s" "github.com/hashicorp/consul-k8s/helper/controller" @@ -48,12 +49,22 @@ type Command struct { flagAddK8SNamespaceSuffix bool flagLogLevel string + // Flags to support namespaces + flagEnableNamespaces bool // Use namespacing on all components + flagConsulDestinationNamespace string // Consul namespace to register everything if not mirroring + flagAllowK8sNamespacesList []string // K8s namespaces to explicitly inject + flagDenyK8sNamespacesList []string // K8s namespaces to deny injection (has precedence) + flagEnableK8SNSMirroring bool // Enables mirroring of k8s namespaces into Consul + flagK8SNSMirroringPrefix string // Prefix added to Consul namespaces created when mirroring + flagCrossNamespaceACLPolicy string // The name of the ACL policy to add to every created namespace if ACLs are enabled + consulClient *api.Client clientset kubernetes.Interface - once sync.Once - sigCh chan os.Signal - help string + once sync.Once + sigCh chan os.Signal + help string + logger hclog.Logger } func (c *Command) init() { @@ -101,13 +112,38 @@ func (c *Command) init() { c.flags.StringVar(&c.flagLogLevel, "log-level", "info", "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ "\"debug\", \"info\", \"warn\", and \"error\".") + c.flags.Var((*flags.AppendSliceValue)(&c.flagAllowK8sNamespacesList), "allow-k8s-namespace", + "K8s namespaces to explicitly allow. May be specified multiple times.") + c.flags.Var((*flags.AppendSliceValue)(&c.flagDenyK8sNamespacesList), "deny-k8s-namespace", + "K8s namespaces to explicitly deny. Takes precedence over allow. May be specified multiple times.") + c.flags.BoolVar(&c.flagEnableNamespaces, "enable-namespaces", false, + "[Enterprise Only] Enables namespaces, in either a single Consul namespace or mirrored") + c.flags.StringVar(&c.flagConsulDestinationNamespace, "consul-destination-namespace", "default", + "[Enterprise Only] Defines which Consul namespace to register all synced services into. If 'enable-namespace-mirroring' "+ + "is true, this is not used.") + c.flags.BoolVar(&c.flagEnableK8SNSMirroring, "enable-k8s-namespace-mirroring", false, "[Enterprise Only] Enables "+ + "namespace mirroring") + c.flags.StringVar(&c.flagK8SNSMirroringPrefix, "k8s-namespace-mirroring-prefix", "", + "[Enterprise Only] Prefix that will be added to all k8s namespaces mirrored into Consul if mirroring is enabled.") + c.flags.StringVar(&c.flagCrossNamespaceACLPolicy, "consul-cross-namespace-acl-policy", "", + "[Enterprise Only] Name of the ACL policy to attach to all created Consul namespaces to allow service "+ + "discovery across Consul namespaces. Only necessary if ACLs are enabled.") c.http = &flags.HTTPFlags{} c.k8s = &k8sflags.K8SFlags{} flags.Merge(c.flags, c.http.ClientFlags()) flags.Merge(c.flags, c.http.ServerFlags()) flags.Merge(c.flags, c.k8s.Flags()) + c.help = flags.Usage(help, c.flags) + + // Wait on an interrupt to exit. This channel must be initialized before + // Run() is called so that there are no race conditions where the channel + // is not defined. + if c.sigCh == nil { + c.sigCh = make(chan os.Signal, 1) + signal.Notify(c.sigCh, os.Interrupt) + } } func (c *Command) Run(args []string) int { @@ -120,7 +156,7 @@ func (c *Command) Run(args []string) int { return 1 } - // create the clientset + // Create the k8s clientset if c.clientset == nil { config, err := subcommand.K8SConfig(c.k8s.KubeConfig()) if err != nil { @@ -145,51 +181,93 @@ func (c *Command) Run(args []string) int { } } - level := hclog.LevelFromString(c.flagLogLevel) - if level == hclog.NoLevel { - c.UI.Error(fmt.Sprintf("Unknown log level: %s", c.flagLogLevel)) - return 1 + // Set up logging + if c.logger == nil { + level := hclog.LevelFromString(c.flagLogLevel) + if level == hclog.NoLevel { + c.UI.Error(fmt.Sprintf("Unknown log level: %s", c.flagLogLevel)) + return 1 + } + c.logger = hclog.New(&hclog.LoggerOptions{ + Level: level, + Output: os.Stderr, + }) } - logger := hclog.New(&hclog.LoggerOptions{ - Level: level, - Output: os.Stderr, - }) // Get the sync interval var syncInterval time.Duration c.flagConsulWritePeriod.Merge(&syncInterval) + // Convert allow/deny lists to sets + allowSet := mapset.NewSet() + denySet := mapset.NewSet() + if c.flagK8SSourceNamespace != "" { + // For backwards compatibility, if `flagK8SSourceNamespace` is set, + // it will be the only allowed namespace + allowSet.Add(c.flagK8SSourceNamespace) + } else { + for _, allow := range c.flagAllowK8sNamespacesList { + allowSet.Add(allow) + } + for _, deny := range c.flagDenyK8sNamespacesList { + denySet.Add(deny) + } + } + c.logger.Info("K8s namespace syncing configuration", "k8s namespaces allowed to be synced", allowSet, + "k8s namespaces denied from syncing", denySet) + // Create the context we'll use to cancel everything ctx, cancelF := context.WithCancel(context.Background()) // Start the K8S-to-Consul syncer var toConsulCh chan struct{} if c.flagToConsul { + // If namespaces are enabled we need to use a new Consul API endpoint + // to list node services. This endpoint is only available in Consul + // 1.7+. To preserve backwards compatibility, when namespaces are not + // enabled we use a client that queries the older API endpoint. + var svcsClient catalogtoconsul.ConsulNodeServicesClient + if c.flagEnableNamespaces { + svcsClient = &catalogtoconsul.NamespacesNodeServicesClient{ + Client: c.consulClient, + } + } else { + svcsClient = &catalogtoconsul.PreNamespacesNodeServicesClient{ + Client: c.consulClient, + } + } // Build the Consul sync and start it syncer := &catalogtoconsul.ConsulSyncer{ - Client: c.consulClient, - Log: logger.Named("to-consul/sink"), - Namespace: c.flagK8SSourceNamespace, - SyncPeriod: syncInterval, - ServicePollPeriod: syncInterval * 2, - ConsulK8STag: c.flagConsulK8STag, + Client: c.consulClient, + Log: c.logger.Named("to-consul/sink"), + EnableNamespaces: c.flagEnableNamespaces, + CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, + SyncPeriod: syncInterval, + ServicePollPeriod: syncInterval * 2, + ConsulK8STag: c.flagConsulK8STag, + ConsulNodeServicesClient: svcsClient, } go syncer.Run(ctx) // Build the controller and start it ctl := &controller.Controller{ - Log: logger.Named("to-consul/controller"), + Log: c.logger.Named("to-consul/controller"), Resource: &catalogtoconsul.ServiceResource{ - Log: logger.Named("to-consul/source"), - Client: c.clientset, - Syncer: syncer, - Namespace: c.flagK8SSourceNamespace, - ExplicitEnable: !c.flagK8SDefault, - ClusterIPSync: c.flagSyncClusterIPServices, - NodePortSync: catalogtoconsul.NodePortSyncType(c.flagNodePortSyncType), - ConsulK8STag: c.flagConsulK8STag, - ConsulServicePrefix: c.flagConsulServicePrefix, - AddK8SNamespaceSuffix: c.flagAddK8SNamespaceSuffix, + Log: c.logger.Named("to-consul/source"), + Client: c.clientset, + Syncer: syncer, + AllowK8sNamespacesSet: allowSet, + DenyK8sNamespacesSet: denySet, + ExplicitEnable: !c.flagK8SDefault, + ClusterIPSync: c.flagSyncClusterIPServices, + NodePortSync: catalogtoconsul.NodePortSyncType(c.flagNodePortSyncType), + ConsulK8STag: c.flagConsulK8STag, + ConsulServicePrefix: c.flagConsulServicePrefix, + AddK8SNamespaceSuffix: c.flagAddK8SNamespaceSuffix, + EnableNamespaces: c.flagEnableNamespaces, + ConsulDestinationNamespace: c.flagConsulDestinationNamespace, + EnableK8SNSMirroring: c.flagEnableK8SNSMirroring, + K8SNSMirroringPrefix: c.flagK8SNSMirroringPrefix, }, } @@ -206,7 +284,7 @@ func (c *Command) Run(args []string) int { sink := &catalogtok8s.K8SSink{ Client: c.clientset, Namespace: c.flagK8SWriteNamespace, - Log: logger.Named("to-k8s/sink"), + Log: c.logger.Named("to-k8s/sink"), } source := &catalogtok8s.Source{ @@ -214,14 +292,14 @@ func (c *Command) Run(args []string) int { Domain: c.flagConsulDomain, Sink: sink, Prefix: c.flagK8SServicePrefix, - Log: logger.Named("to-k8s/source"), + Log: c.logger.Named("to-k8s/source"), ConsulK8STag: c.flagConsulK8STag, } go source.Run(ctx) // Build the controller and start it ctl := &controller.Controller{ - Log: logger.Named("to-k8s/controller"), + Log: c.logger.Named("to-k8s/controller"), Resource: sink, } @@ -244,9 +322,6 @@ func (c *Command) Run(args []string) int { } }() - // Wait on an interrupt to exit - c.sigCh = make(chan os.Signal, 1) - signal.Notify(c.sigCh, os.Interrupt) select { // Unexpected exit case <-toConsulCh: diff --git a/subcommand/sync-catalog/command_ent_test.go b/subcommand/sync-catalog/command_ent_test.go new file mode 100644 index 0000000000..971a04f35e --- /dev/null +++ b/subcommand/sync-catalog/command_ent_test.go @@ -0,0 +1,733 @@ +// +build enterprise + +package synccatalog + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/go-hclog" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +// Test syncing to a single destination consul namespace. +func TestRun_ToConsulSingleDestinationNamespace(t *testing.T) { + t.Parallel() + + cases := []struct { + Name string + ConsulDestNamespace string + ExpectedNamespaces []string + }{ + { + "default single namespace", + "default", + []string{"default"}, + }, + { + "non-default single namespace", + "destination", + []string{"default", "destination"}, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(tt *testing.T) { + k8s, testAgent := completeSetupEnterprise(tt) + defer testAgent.Stop() + + // Run the command. + ui := cli.NewMockUi() + consulClient, err := api.NewClient(&api.Config{ + Address: testAgent.HTTPAddr, + }) + require.NoError(tt, err) + + cmd := Command{ + UI: ui, + clientset: k8s, + consulClient: consulClient, + logger: hclog.New(&hclog.LoggerOptions{ + Name: tt.Name(), + Level: hclog.Debug, + }), + } + + // Create two services in k8s in default and foo namespaces. + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(lbService("default", "1.1.1.1")) + require.NoError(tt, err) + _, err = k8s.CoreV1().Namespaces().Create(&apiv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }) + require.NoError(tt, err) + _, err = k8s.CoreV1().Services("foo").Create(lbService("foo", "1.1.1.1")) + require.NoError(tt, err) + + exitChan := runCommandAsynchronously(&cmd, []string{ + "-consul-write-interval", "500ms", + "-add-k8s-namespace-suffix", + "-log-level=debug", + "-enable-namespaces", + "-consul-destination-namespace", c.ConsulDestNamespace, + "-allow-k8s-namespace=*", + "-add-k8s-namespace-suffix=false", + }) + defer stopCommand(tt, &cmd, exitChan) + + timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, tt, func(r *retry.R) { + // Both services should have been created in the destination namespace. + for _, svcName := range []string{"default", "foo"} { + instances, _, err := consulClient.Catalog().Service(svcName, "k8s", &api.QueryOptions{ + Namespace: c.ConsulDestNamespace, + }) + require.NoError(r, err) + require.Len(r, instances, 1) + require.Equal(r, instances[0].ServiceName, svcName) + } + + // Verify namespace creation details + // Check that we have the right number of namespaces + namespaces, _, err := consulClient.Namespaces().List(&api.QueryOptions{}) + require.NoError(r, err) + require.Len(r, namespaces, len(c.ExpectedNamespaces)) + + // Check the namespace details + for _, ns := range c.ExpectedNamespaces { + actNamespace, _, err := consulClient.Namespaces().Read(ns, &api.QueryOptions{}) + require.NoErrorf(r, err, "error getting namespace %s", ns) + require.NotNilf(r, actNamespace, "namespace %s was nil", ns) + require.Equalf(r, ns, actNamespace.Name, "namespace %s was improperly named", ns) + + // Check created namespace properties + if ns != "default" { + require.Equalf(r, "Auto-generated by a Catalog Sync Process", actNamespace.Description, + "wrong namespace description for namespace %s", ns) + require.Containsf(r, actNamespace.Meta, "external-source", + "namespace %s does not contain external-source metadata key", ns) + require.Equalf(r, "kubernetes", actNamespace.Meta["external-source"], + "namespace %s has wrong value for external-source metadata key", ns) + } + + } + }) + }) + } +} + +// Test syncing with mirroring and different prefixes. +func TestRun_ToConsulMirroringNamespaces(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + // MirroringPrefix is the value passed to -k8s-namespace-mirroring-prefix. + MirroringPrefix string + // ExtraFlags are extra flags for the command. + ExtraFlags []string + // ExpectNamespaceSuffix controls whether we expect the service names + // to have their namespaces as a suffix. + ExpectNamespaceSuffix bool + // The namespaces that we expect to be created + ExpectedNamespaces []string + }{ + "no prefix, no suffix": { + MirroringPrefix: "", + ExtraFlags: []string{"-add-k8s-namespace-suffix=false"}, + ExpectNamespaceSuffix: false, + ExpectedNamespaces: []string{"default", "foo"}, + }, + "no prefix, with suffix": { + MirroringPrefix: "", + ExtraFlags: []string{"-add-k8s-namespace-suffix=true"}, + ExpectNamespaceSuffix: true, + ExpectedNamespaces: []string{"default", "foo"}, + }, + "with prefix, no suffix": { + MirroringPrefix: "prefix-", + ExtraFlags: []string{"-add-k8s-namespace-suffix=false"}, + ExpectNamespaceSuffix: false, + ExpectedNamespaces: []string{"default", "prefix-default", "prefix-foo"}, + }, + "with prefix, with suffix": { + MirroringPrefix: "prefix-", + ExtraFlags: []string{"-add-k8s-namespace-suffix=true"}, + ExpectNamespaceSuffix: true, + ExpectedNamespaces: []string{"default", "prefix-default", "prefix-foo"}, + }, + "no prefix, no suffix, with destination namespace flag": { + MirroringPrefix: "", + // Mirroring takes precedence over the -consul-destination-namespace + // flag so it should have no effect. + ExtraFlags: []string{"-add-k8s-namespace-suffix=false", "-consul-destination-namespace=dest"}, + ExpectNamespaceSuffix: false, + ExpectedNamespaces: []string{"default", "foo"}, + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + k8s, testAgent := completeSetupEnterprise(tt) + defer testAgent.Stop() + + // Run the command. + ui := cli.NewMockUi() + consulClient, err := api.NewClient(&api.Config{ + Address: testAgent.HTTPAddr, + }) + require.NoError(tt, err) + + cmd := Command{ + UI: ui, + clientset: k8s, + consulClient: consulClient, + logger: hclog.New(&hclog.LoggerOptions{ + Name: tt.Name(), + Level: hclog.Debug, + }), + } + + // Create two services in k8s in default and foo namespaces. + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(lbService("default", "1.1.1.1")) + require.NoError(tt, err) + _, err = k8s.CoreV1().Namespaces().Create(&apiv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }) + require.NoError(tt, err) + _, err = k8s.CoreV1().Services("foo").Create(lbService("foo", "1.1.1.1")) + require.NoError(tt, err) + + args := append([]string{ + "-consul-write-interval", "500ms", + "-add-k8s-namespace-suffix", + "-log-level=debug", + "-enable-namespaces", + "-allow-k8s-namespace=*", + "-enable-k8s-namespace-mirroring", + "-k8s-namespace-mirroring-prefix", c.MirroringPrefix, + }, c.ExtraFlags...) + exitChan := runCommandAsynchronously(&cmd, args) + defer stopCommand(tt, &cmd, exitChan) + + timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, tt, func(r *retry.R) { + // Each service should have been created in a mirrored namespace. + for _, svcName := range []string{"default", "foo"} { + // NOTE: svcName is the same as the kubernetes namespace. + expNamespace := c.MirroringPrefix + svcName + if c.ExpectNamespaceSuffix { + // Since the service name is the same as the namespace, + // in the case of the namespace suffix we expect + // the service name to be suffixed. + svcName = fmt.Sprintf("%s-%s", svcName, svcName) + } + instances, _, err := consulClient.Catalog().Service(svcName, "k8s", &api.QueryOptions{ + Namespace: expNamespace, + }) + require.NoError(r, err) + require.Len(r, instances, 1) + require.Equal(r, instances[0].ServiceName, svcName) + } + }) + + timer = &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, tt, func(r *retry.R) { + // Verify namespace creation details + // Check that we have the right number of namespaces + namespaces, _, err := consulClient.Namespaces().List(&api.QueryOptions{}) + require.NoError(r, err) + require.Len(r, namespaces, len(c.ExpectedNamespaces)) + + // Check the namespace details + for _, ns := range c.ExpectedNamespaces { + actNamespace, _, err := consulClient.Namespaces().Read(ns, &api.QueryOptions{}) + require.NoErrorf(r, err, "error getting namespace %s", ns) + require.NotNilf(r, actNamespace, "namespace %s was nil", ns) + require.Equalf(r, ns, actNamespace.Name, "namespace %s was improperly named", ns) + + // Check created namespace properties + if ns != "default" { + require.Equalf(r, "Auto-generated by a Catalog Sync Process", actNamespace.Description, + "wrong namespace description for namespace %s", ns) + require.Containsf(r, actNamespace.Meta, "external-source", + "namespace %s does not contain external-source metadata key", ns) + require.Equalf(r, "kubernetes", actNamespace.Meta["external-source"], + "namespace %s has wrong value for external-source metadata key", ns) + } + + } + + }) + }) + } +} + +// Test that when flags are changed and the command re-run, old services +// are deleted and new services are created where expected. +func TestRun_ToConsulChangingNamespaceFlags(t *testing.T) { + t.Parallel() + + // There are many different settings: + // 1. Namespaces enabled with a single destination namespace (single-dest-ns) + // 2. Namespaces enabled with mirroring namespaces (mirroring-ns) + // 3. Namespaces enabled with mirroring namespaces and prefixes (mirroring-ns-prefix) + // + // NOTE: In all cases, two services will be created in Kubernetes: + // 1. namespace: default, name: default + // 2. namespace: foo, name: foo + + cases := map[string]struct { + // FirstRunFlags are the command flags for the first run of the command. + FirstRunFlags []string + // FirstRunExpServices are the services we expect to be created on the + // first run. They're specified as "name/namespace". + FirstRunExpServices []string + // SecondRunFlags are the command flags for the second run of the command. + SecondRunFlags []string + // SecondRunExpServices are the services we expect to be created on the + // second run. They're specified as "name/namespace". + SecondRunExpServices []string + // SecondRunExpDeletedServices are the services we expect to be deleted + // on the second run. They're specified as "name/namespace". + SecondRunExpDeletedServices []string + }{ + "namespaces-disabled => single-dest-ns=default": { + FirstRunFlags: nil, + FirstRunExpServices: []string{"foo/default", "default/default"}, + SecondRunFlags: []string{ + "-enable-namespaces", + "-consul-destination-namespace=default", + }, + SecondRunExpServices: []string{"foo/default", "default/default"}, + SecondRunExpDeletedServices: nil, + }, + "namespaces-disabled => single-dest-ns=dest": { + FirstRunFlags: nil, + FirstRunExpServices: []string{"foo/default", "default/default"}, + SecondRunFlags: []string{ + "-enable-namespaces", + "-consul-destination-namespace=dest", + }, + SecondRunExpServices: []string{"foo/dest", "default/dest"}, + SecondRunExpDeletedServices: []string{"foo/default", "default/default"}, + }, + "namespaces-disabled => mirroring-ns": { + FirstRunFlags: nil, + FirstRunExpServices: []string{"foo/default", "default/default"}, + SecondRunFlags: []string{ + "-enable-namespaces", + "-enable-k8s-namespace-mirroring", + }, + SecondRunExpServices: []string{"foo/foo", "default/default"}, + SecondRunExpDeletedServices: []string{"foo/default"}, + }, + "namespaces-disabled => mirroring-ns-prefix": { + FirstRunFlags: nil, + FirstRunExpServices: []string{"foo/default", "default/default"}, + SecondRunFlags: []string{ + "-enable-namespaces", + "-enable-k8s-namespace-mirroring", + "-k8s-namespace-mirroring-prefix=prefix-", + }, + SecondRunExpServices: []string{"foo/prefix-foo", "default/prefix-default"}, + SecondRunExpDeletedServices: []string{"foo/default", "default/default"}, + }, + "single-dest-ns=first => single-dest-ns=second": { + FirstRunFlags: []string{ + "-enable-namespaces", + "-consul-destination-namespace=first", + }, + FirstRunExpServices: []string{"foo/first", "default/first"}, + SecondRunFlags: []string{ + "-enable-namespaces", + "-consul-destination-namespace=second", + }, + SecondRunExpServices: []string{"foo/second", "default/second"}, + SecondRunExpDeletedServices: []string{"foo/first", "default/first"}, + }, + "single-dest-ns => mirroring-ns": { + FirstRunFlags: []string{ + "-enable-namespaces", + "-consul-destination-namespace=first", + }, + FirstRunExpServices: []string{"foo/first", "default/first"}, + SecondRunFlags: []string{ + "-enable-namespaces", + "-enable-k8s-namespace-mirroring", + }, + SecondRunExpServices: []string{"foo/foo", "default/default"}, + SecondRunExpDeletedServices: []string{"foo/first", "default/first"}, + }, + "single-dest-ns => mirroring-ns-prefix": { + FirstRunFlags: []string{ + "-enable-namespaces", + "-consul-destination-namespace=first", + }, + FirstRunExpServices: []string{"foo/first", "default/first"}, + SecondRunFlags: []string{ + "-enable-namespaces", + "-enable-k8s-namespace-mirroring", + "-k8s-namespace-mirroring-prefix=prefix-", + }, + SecondRunExpServices: []string{"foo/prefix-foo", "default/prefix-default"}, + SecondRunExpDeletedServices: []string{"foo/first", "default/first"}, + }, + "mirroring-ns => single-dest-ns": { + FirstRunFlags: []string{ + "-enable-namespaces", + "-enable-k8s-namespace-mirroring", + }, + FirstRunExpServices: []string{"foo/foo", "default/default"}, + SecondRunFlags: []string{ + "-enable-namespaces", + "-consul-destination-namespace=second", + }, + SecondRunExpServices: []string{"foo/second", "default/second"}, + SecondRunExpDeletedServices: []string{"foo/foo", "default/default"}, + }, + "mirroring-ns => mirroring-ns-prefix": { + FirstRunFlags: []string{ + "-enable-namespaces", + "-enable-k8s-namespace-mirroring", + }, + FirstRunExpServices: []string{"foo/foo", "default/default"}, + SecondRunFlags: []string{ + "-enable-namespaces", + "-enable-k8s-namespace-mirroring", + "-k8s-namespace-mirroring-prefix=prefix-", + }, + SecondRunExpServices: []string{"foo/prefix-foo", "default/prefix-default"}, + SecondRunExpDeletedServices: []string{"foo/foo", "default/default"}, + }, + "mirroring-ns-prefix => single-dest-ns": { + FirstRunFlags: []string{ + "-enable-namespaces", + "-enable-k8s-namespace-mirroring", + "-k8s-namespace-mirroring-prefix=prefix-", + }, + FirstRunExpServices: []string{"foo/prefix-foo", "default/prefix-default"}, + SecondRunFlags: []string{ + "-enable-namespaces", + "-consul-destination-namespace=second", + }, + SecondRunExpServices: []string{"foo/second", "default/second"}, + SecondRunExpDeletedServices: []string{"foo/prefix-foo", "default/prefix-default"}, + }, + "mirroring-ns-prefix => mirroring-ns": { + FirstRunFlags: []string{ + "-enable-namespaces", + "-enable-k8s-namespace-mirroring", + "-k8s-namespace-mirroring-prefix=prefix-", + }, + FirstRunExpServices: []string{"foo/prefix-foo", "default/prefix-default"}, + SecondRunFlags: []string{ + "-enable-namespaces", + "-enable-k8s-namespace-mirroring", + }, + SecondRunExpServices: []string{"foo/foo", "default/default"}, + SecondRunExpDeletedServices: []string{"foo/prefix-foo", "default/prefix-default"}, + }, + } + + nameAndNS := func(s string) (string, string) { + split := strings.Split(s, "/") + return split[0], split[1] + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + k8s, testAgent := completeSetupEnterprise(tt) + defer testAgent.Stop() + ui := cli.NewMockUi() + consulClient, err := api.NewClient(&api.Config{ + Address: testAgent.HTTPAddr, + }) + require.NoError(tt, err) + + commonArgs := []string{ + "-consul-write-interval", "500ms", + "-log-level=debug", + "-allow-k8s-namespace=*", + } + + // Create two services in k8s in default and foo namespaces. + { + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(lbService("default", "1.1.1.1")) + require.NoError(tt, err) + _, err = k8s.CoreV1().Namespaces().Create(&apiv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }) + require.NoError(tt, err) + _, err = k8s.CoreV1().Services("foo").Create(lbService("foo", "1.1.1.1")) + require.NoError(tt, err) + } + + // Run the first command. + { + firstCmd := Command{ + UI: ui, + clientset: k8s, + consulClient: consulClient, + logger: hclog.New(&hclog.LoggerOptions{ + Name: tt.Name() + "-firstrun", + Level: hclog.Debug, + }), + } + exitChan := runCommandAsynchronously(&firstCmd, append(commonArgs, c.FirstRunFlags...)) + + // Wait until the expected services are synced. + timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, tt, func(r *retry.R) { + for _, svcNamespace := range c.FirstRunExpServices { + svcName, ns := nameAndNS(svcNamespace) + instances, _, err := consulClient.Catalog().Service(svcName, "k8s", &api.QueryOptions{ + Namespace: ns, + }) + require.NoError(r, err) + require.Len(r, instances, 1) + require.Equal(r, instances[0].ServiceName, svcName) + } + }) + stopCommand(tt, &firstCmd, exitChan) + } + tt.Log("first command run complete") + + // Run the second command. + { + secondCmd := Command{ + UI: ui, + clientset: k8s, + consulClient: consulClient, + logger: hclog.New(&hclog.LoggerOptions{ + Name: tt.Name() + "-secondrun", + Level: hclog.Debug, + }), + } + exitChan := runCommandAsynchronously(&secondCmd, append(commonArgs, c.SecondRunFlags...)) + defer stopCommand(tt, &secondCmd, exitChan) + + // Wait until the expected services are synced and the old ones + // deleted. + timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, tt, func(r *retry.R) { + for _, svcNamespace := range c.SecondRunExpServices { + svcName, ns := nameAndNS(svcNamespace) + instances, _, err := consulClient.Catalog().Service(svcName, "k8s", &api.QueryOptions{ + Namespace: ns, + }) + require.NoError(r, err) + require.Len(r, instances, 1) + require.Equal(r, instances[0].ServiceName, svcName) + } + }) + tt.Log("existing services verified") + + timer = &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, tt, func(r *retry.R) { + for _, svcNamespace := range c.SecondRunExpDeletedServices { + svcName, ns := nameAndNS(svcNamespace) + instances, _, err := consulClient.Catalog().Service(svcName, "k8s", &api.QueryOptions{ + Namespace: ns, + }) + require.NoError(r, err) + require.Len(r, instances, 0) + } + }) + tt.Log("deleted services verified") + } + }) + } +} + +// Tests that the cross-namespace ACL policy is correctly +// attached to all created namespaces. Specific teste for +// services and their destinations are covered in other tests. +func TestRun_ToConsulNamespacesACLs(t *testing.T) { + cases := []struct { + Name string + Flags []string + ExpectedNamespaces []string + }{ + { + "acls + single destination namespace 'default'", + []string{"-consul-destination-namespace=default"}, + []string{"default"}, + }, + { + "acls + non-default single namespace", + []string{"-consul-destination-namespace=destination"}, + []string{"default", "destination"}, + }, + { + "acls + mirroring", + []string{ + "-consul-destination-namespace=default", // overridden by mirroring + "-enable-k8s-namespace-mirroring", + }, + []string{"default", "non-default"}, + }, + { + "acls + mirroring with prefix", + []string{ + "-consul-destination-namespace=default", // overridden by mirroring + "-enable-k8s-namespace-mirroring", + "-k8s-namespace-mirroring-prefix=prefix-", + }, + []string{"default", "prefix-default", "prefix-non-default"}, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(tt *testing.T) { + // Set up k8s client + k8s := fake.NewSimpleClientset() + + // Create two k8s services in two different namespaces + _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(lbService("default", "1.1.1.1")) + require.NoError(tt, err) + _, err = k8s.CoreV1().Namespaces().Create(&apiv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "non-default", + }, + }) + require.NoError(tt, err) + _, err = k8s.CoreV1().Services("non-default").Create(lbService("non-default", "1.1.1.1")) + require.NoError(tt, err) + + // Set up consul server + a, err := testutil.NewTestServerConfigT(tt, func(client *testutil.TestServerConfig) { + client.ACL.Enabled = true + }) + require.NoError(tt, err) + defer a.Stop() + + // Set up a client for bootstrapping + bootClient, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(tt, err) + + // Bootstrap the server and get the bootstrap token + var bootstrapResp *api.ACLToken + timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, tt, func(r *retry.R) { + bootstrapResp, _, err = bootClient.ACL().Bootstrap() + require.NoError(r, err) + }) + bootstrapToken := bootstrapResp.SecretID + require.NotEmpty(tt, bootstrapToken) + + // Set up consul client + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + Token: bootstrapToken, + }) + require.NoError(tt, err) + + // Create cross namespace policy + // This would have been created by the acl bootstrapper in the + // default namespace to be attached to all created namespaces. + crossNamespaceRules := `namespace_prefix "" { + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } +} ` + + policyTmpl := api.ACLPolicy{ + Name: "cross-namespace-policy", + Description: "Policy to allow permissions to cross Consul namespaces for k8s services", + Rules: crossNamespaceRules, + } + + _, _, err = client.ACL().PolicyCreate(&policyTmpl, &api.WriteOptions{}) + require.NoError(tt, err) + + // Set up the sync command + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + consulClient: client, + logger: hclog.New(&hclog.LoggerOptions{ + Name: tt.Name(), + Level: hclog.Debug, + }), + } + + // Set flags and run the command + commonArgs := []string{ + "-consul-write-interval", "500ms", + "-log-level=debug", + "-allow-k8s-namespace=*", + "-enable-namespaces", + "-consul-cross-namespace-acl-policy=cross-namespace-policy", + } + exitChan := runCommandAsynchronously(&cmd, append(commonArgs, c.Flags...)) + defer stopCommand(tt, &cmd, exitChan) + + // Check the namespaces are created correctly + timer = &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, tt, func(r *retry.R) { + // Check that we have the right number of namespaces + namespaces, _, err := client.Namespaces().List(&api.QueryOptions{}) + require.NoError(r, err) + require.Len(r, namespaces, len(c.ExpectedNamespaces)) + + // Check the namespace details + for _, ns := range c.ExpectedNamespaces { + actNamespace, _, err := client.Namespaces().Read(ns, &api.QueryOptions{}) + require.NoErrorf(r, err, "error getting namespace %s", ns) + require.NotNilf(r, actNamespace, "namespace %s was nil", ns) + require.Equalf(r, ns, actNamespace.Name, "namespace %s was improperly named", ns) + + // Check created namespace properties + if ns != "default" { + require.Equalf(r, "Auto-generated by a Catalog Sync Process", actNamespace.Description, + "wrong namespace description for namespace %s", ns) + require.Containsf(r, actNamespace.Meta, "external-source", + "namespace %s does not contain external-source metadata key", ns) + require.Equalf(r, "kubernetes", actNamespace.Meta["external-source"], + "namespace %s has wrong value for external-source metadata key", ns) + + // Check for ACL policy things + // The acl bootstrapper will update the `default` namespace, so that + // can't be tested here. + require.NotNilf(r, actNamespace.ACLs, "ACLs was nil for namespace %s", ns) + require.Lenf(r, actNamespace.ACLs.PolicyDefaults, 1, "wrong length for PolicyDefaults in namespace %s", ns) + require.Equalf(r, "cross-namespace-policy", actNamespace.ACLs.PolicyDefaults[0].Name, + "wrong policy name for namespace %s", ns) + } + + } + + }) + }) + } +} + +// Set up test consul agent and fake kubernetes cluster client +// todo: use this setup method everywhere. The old one (completeSetup) uses +// the test agent instead of the testserver. +func completeSetupEnterprise(t *testing.T) (*fake.Clientset, *testutil.TestServer) { + k8s := fake.NewSimpleClientset() + svr, err := testutil.NewTestServerT(t) + require.NoError(t, err) + return k8s, svr +} diff --git a/subcommand/sync-catalog/command_test.go b/subcommand/sync-catalog/command_test.go index 5a299dce16..0849626f94 100644 --- a/subcommand/sync-catalog/command_test.go +++ b/subcommand/sync-catalog/command_test.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/consul/agent" "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" apiv1 "k8s.io/api/core/v1" @@ -25,6 +26,10 @@ func TestRun_Defaults_SyncsConsulServiceToK8s(t *testing.T) { cmd := Command{ UI: ui, clientset: k8s, + logger: hclog.New(&hclog.LoggerOptions{ + Name: t.Name(), + Level: hclog.Debug, + }), } exitChan := runCommandAsynchronously(&cmd, []string{ @@ -55,6 +60,11 @@ func TestRun_ToConsulWithAddK8SNamespaceSuffix(t *testing.T) { UI: ui, clientset: k8s, consulClient: testAgent.Client(), + logger: hclog.New(&hclog.LoggerOptions{ + Name: t.Name(), + Level: hclog.Debug, + }), + flagAllowK8sNamespacesList: []string{"*"}, } // create a service in k8s @@ -91,6 +101,11 @@ func TestCommand_Run_ToConsulChangeAddK8SNamespaceSuffixToTrue(t *testing.T) { UI: ui, clientset: k8s, consulClient: testAgent.Client(), + logger: hclog.New(&hclog.LoggerOptions{ + Name: t.Name(), + Level: hclog.Debug, + }), + flagAllowK8sNamespacesList: []string{"*"}, } // create a service in k8s @@ -143,6 +158,11 @@ func TestCommand_Run_ToConsulTwoServicesSameNameDifferentNamespace(t *testing.T) UI: ui, clientset: k8s, consulClient: testAgent.Client(), + logger: hclog.New(&hclog.LoggerOptions{ + Name: t.Name(), + Level: hclog.Debug, + }), + flagAllowK8sNamespacesList: []string{"*"}, } // create two services in k8s @@ -172,6 +192,284 @@ func TestCommand_Run_ToConsulTwoServicesSameNameDifferentNamespace(t *testing.T) }) } +// Test the allow/deny list combinations. +func TestRun_ToConsulAllowDenyLists(t *testing.T) { + t.Parallel() + + // NOTE: In all cases, two services will be created in Kubernetes: + // 1. namespace: default, name: default + // 2. namespace: foo, name: foo + + cases := map[string]struct { + AllowList []string + DenyList []string + ExpServices []string + }{ + "empty lists": { + AllowList: nil, + DenyList: nil, + ExpServices: nil, + }, + "only from allow list": { + AllowList: []string{"foo"}, + DenyList: nil, + ExpServices: []string{"foo"}, + }, + "both in allow and deny": { + AllowList: []string{"foo"}, + DenyList: []string{"foo"}, + ExpServices: nil, + }, + "deny removes one from allow": { + AllowList: []string{"foo", "default"}, + DenyList: []string{"foo"}, + ExpServices: []string{"default"}, + }, + "* in allow": { + AllowList: []string{"*"}, + DenyList: nil, + ExpServices: []string{"foo", "default"}, + }, + "* in allow with one denied": { + AllowList: []string{"*"}, + DenyList: []string{"foo"}, + ExpServices: []string{"default"}, + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + k8s, testAgent := completeSetup(tt) + defer testAgent.Shutdown() + ui := cli.NewMockUi() + consulClient := testAgent.Client() + + // Create two services in k8s in default and foo namespaces. + { + _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(lbService("default", "1.1.1.1")) + require.NoError(tt, err) + _, err = k8s.CoreV1().Namespaces().Create(&apiv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }) + require.NoError(tt, err) + _, err = k8s.CoreV1().Services("foo").Create(lbService("foo", "1.1.1.1")) + require.NoError(tt, err) + } + + flags := []string{ + "-consul-write-interval", "500ms", + "-log-level=debug", + } + for _, allow := range c.AllowList { + flags = append(flags, "-allow-k8s-namespace", allow) + } + for _, deny := range c.DenyList { + flags = append(flags, "-deny-k8s-namespace", deny) + } + + cmd := Command{ + UI: ui, + clientset: k8s, + consulClient: consulClient, + logger: hclog.New(&hclog.LoggerOptions{ + Name: tt.Name(), + Level: hclog.Debug, + }), + } + exitChan := runCommandAsynchronously(&cmd, flags) + defer stopCommand(tt, &cmd, exitChan) + + timer := &retry.Timer{Timeout: 2 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, tt, func(r *retry.R) { + svcs, _, err := consulClient.Catalog().Services(nil) + require.NoError(r, err) + // There should be the number of expected services plus one + // for the default Consul service. + require.Len(r, svcs, len(c.ExpServices)+1) + for _, svc := range c.ExpServices { + instances, _, err := consulClient.Catalog().Service(svc, "k8s", nil) + require.NoError(r, err) + require.Len(r, instances, 1) + require.Equal(r, instances[0].ServiceName, svc) + } + }) + }) + } +} + +// Test that when flags are changed and the command re-run, old services +// are deleted and new services are created where expected. +func TestRun_ToConsulChangingFlags(t *testing.T) { + t.Parallel() + + // NOTE: In all cases, two services will be created in Kubernetes: + // 1. namespace: default, name: default + // 2. namespace: foo, name: foo + // + // NOTE: We're not testing all permutations the allow/deny lists. That is + // tested in TestRun_ToConsulAllowDenyLists. We assume that that test + // ensures the allow/deny lists are working and so all we need to test here + // is that if the resulting set of namespaces changes, we add/remove services + // accordingly. + + cases := map[string]struct { + // FirstRunFlags are the command flags for the first run of the command. + FirstRunFlags []string + // FirstRunExpServices are the services we expect to be created on the + // first run. + FirstRunExpServices []string + // SecondRunFlags are the command flags for the second run of the command. + SecondRunFlags []string + // SecondRunExpServices are the services we expect to be created on the + // second run. + SecondRunExpServices []string + // SecondRunExpDeletedServices are the services we expect to be deleted + // on the second run. + SecondRunExpDeletedServices []string + }{ + "service-suffix-false => service-suffix-true": { + FirstRunFlags: []string{ + "-allow-k8s-namespace=*", + "-add-k8s-namespace-suffix=false", + }, + FirstRunExpServices: []string{"foo", "default"}, + SecondRunFlags: []string{ + "-allow-k8s-namespace=*", + "-add-k8s-namespace-suffix=true", + }, + SecondRunExpServices: []string{"foo-foo", "default-default"}, + SecondRunExpDeletedServices: []string{"foo", "default"}, + }, + "service-suffix-true => service-suffix-false": { + FirstRunFlags: []string{ + "-allow-k8s-namespace=*", + "-add-k8s-namespace-suffix=true", + }, + FirstRunExpServices: []string{"foo-foo", "default-default"}, + SecondRunFlags: []string{ + "-allow-k8s-namespace=*", + "-add-k8s-namespace-suffix=false", + }, + SecondRunExpServices: []string{"foo", "default"}, + SecondRunExpDeletedServices: []string{"foo-default", "default-default"}, + }, + "allow-k8s-namespace=* => allow-k8s-namespace=default": { + FirstRunFlags: []string{ + "-allow-k8s-namespace=*", + }, + FirstRunExpServices: []string{"foo", "default"}, + SecondRunFlags: []string{ + "-allow-k8s-namespace=default", + }, + SecondRunExpServices: []string{"default"}, + SecondRunExpDeletedServices: []string{"foo"}, + }, + "allow-k8s-namespace=default => allow-k8s-namespace=*": { + FirstRunFlags: []string{ + "-allow-k8s-namespace=default", + }, + FirstRunExpServices: []string{"default"}, + SecondRunFlags: []string{ + "-allow-k8s-namespace=*", + }, + SecondRunExpServices: []string{"foo", "default"}, + SecondRunExpDeletedServices: nil, + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + k8s, testAgent := completeSetup(tt) + defer testAgent.Shutdown() + ui := cli.NewMockUi() + consulClient := testAgent.Client() + + commonArgs := []string{ + "-consul-write-interval", "500ms", + "-log-level=debug", + } + + // Create two services in k8s in default and foo namespaces. + { + _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(lbService("default", "1.1.1.1")) + require.NoError(tt, err) + _, err = k8s.CoreV1().Namespaces().Create(&apiv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }) + require.NoError(tt, err) + _, err = k8s.CoreV1().Services("foo").Create(lbService("foo", "1.1.1.1")) + require.NoError(tt, err) + } + + // Run the first command. + { + firstCmd := Command{ + UI: ui, + clientset: k8s, + consulClient: consulClient, + logger: hclog.New(&hclog.LoggerOptions{ + Name: tt.Name() + "-firstrun", + Level: hclog.Debug, + }), + } + exitChan := runCommandAsynchronously(&firstCmd, append(commonArgs, c.FirstRunFlags...)) + + // Wait until the expected services are synced. + timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, tt, func(r *retry.R) { + for _, svcName := range c.FirstRunExpServices { + instances, _, err := consulClient.Catalog().Service(svcName, "k8s", nil) + require.NoError(r, err) + require.Len(r, instances, 1) + require.Equal(r, instances[0].ServiceName, svcName) + } + }) + stopCommand(tt, &firstCmd, exitChan) + } + tt.Log("first command run complete") + + // Run the second command. + { + secondCmd := Command{ + UI: ui, + clientset: k8s, + consulClient: consulClient, + logger: hclog.New(&hclog.LoggerOptions{ + Name: tt.Name() + "-secondrun", + Level: hclog.Debug, + }), + } + exitChan := runCommandAsynchronously(&secondCmd, append(commonArgs, c.SecondRunFlags...)) + defer stopCommand(tt, &secondCmd, exitChan) + + // Wait until the expected services are synced and the old ones + // deleted. + timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, tt, func(r *retry.R) { + for _, svcName := range c.SecondRunExpServices { + instances, _, err := consulClient.Catalog().Service(svcName, "k8s", nil) + require.NoError(r, err) + require.Len(r, instances, 1) + require.Equal(r, instances[0].ServiceName, svcName) + } + tt.Log("existing services verified") + + for _, svcName := range c.SecondRunExpDeletedServices { + instances, _, err := consulClient.Catalog().Service(svcName, "k8s", nil) + require.NoError(r, err) + require.Len(r, instances, 0) + } + tt.Log("deleted services verified") + }) + } + }) + } +} + // Set up test consul agent and fake kubernetes cluster client func completeSetup(t *testing.T) (*fake.Clientset, *agent.TestAgent) { k8s := fake.NewSimpleClientset() @@ -185,6 +483,11 @@ func completeSetup(t *testing.T) (*fake.Clientset, *agent.TestAgent) { // Note that it's the responsibility of the caller to terminate the command by calling stopCommand, // otherwise it can run forever. func runCommandAsynchronously(cmd *Command, args []string) chan int { + // We have to run cmd.init() to ensure that the channel the command is + // using to watch for os interrupts is initialized. If we don't do this, + // then if stopCommand is called immediately, it will block forever + // because it calls interrupt() which will attempt to send on a nil channel. + cmd.init() exitChan := make(chan int, 1) go func() {