diff --git a/go.mod b/go.mod index de28f20072..ddc41a0d53 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( k8s.io/client-go v0.21.0 k8s.io/klog/v2 v2.8.0 k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 - sigs.k8s.io/controller-runtime v0.9.0-alpha.1.0.20210413130450-7ef2da0bc161 + sigs.k8s.io/controller-runtime v0.9.0-beta.1.0.20210512131817-ce2f0c92d77e sigs.k8s.io/controller-tools v0.3.0 sigs.k8s.io/yaml v1.2.0 ) diff --git a/go.sum b/go.sum index efbe860faa..344d7f74d8 100644 --- a/go.sum +++ b/go.sum @@ -98,7 +98,6 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.35.20 h1:Hs7x9Czh+MMPnZLQqHhsuZKeNFA3Vuf7pdy2r5QlVb0= github.com/aws/aws-sdk-go v1.35.20/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/aws/aws-sdk-go v1.38.23 h1:lSLWSu2itm9eH45iwiFCdcjFyU7Ec0oS0CNHr+/mVek= github.com/aws/aws-sdk-go v1.38.23/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= @@ -1257,7 +1256,6 @@ k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8 k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.21.0-rc.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= -k8s.io/code-generator v0.21.0 h1:LGWJOvkbBNpuRBqBRXUjzfvymUh7F/iR2KDpwLnqCM4= k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= k8s.io/component-base v0.18.0-beta.2/go.mod h1:HVk5FpRnyzQ/MjBr9//e/yEBjTVa2qjGXCTuUzcD7ks= @@ -1275,7 +1273,6 @@ k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -1326,8 +1323,9 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQb sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo= sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E= -sigs.k8s.io/controller-runtime v0.9.0-alpha.1.0.20210413130450-7ef2da0bc161 h1:ac0kOprg3oBN+X+Xn4W0XqHWC15zw7sWlFc3zM7PQj8= sigs.k8s.io/controller-runtime v0.9.0-alpha.1.0.20210413130450-7ef2da0bc161/go.mod h1:ufPDuvefw2Y1KnBgHQrLdOjueYlj+XJV2AszbT+WTxs= +sigs.k8s.io/controller-runtime v0.9.0-beta.1.0.20210512131817-ce2f0c92d77e h1:sBd50AyUA/YEhWNtkIB16ep1scbkeOa2Om52t1wk/vo= +sigs.k8s.io/controller-runtime v0.9.0-beta.1.0.20210512131817-ce2f0c92d77e/go.mod h1:ufPDuvefw2Y1KnBgHQrLdOjueYlj+XJV2AszbT+WTxs= sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= sigs.k8s.io/controller-tools v0.3.0 h1:y3YD99XOyWaXkiF1kd41uRvfp/64teWcrEZFuHxPhJ4= sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI= diff --git a/vendor/modules.txt b/vendor/modules.txt index c8c264c56e..c6d5257112 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -668,7 +668,7 @@ k8s.io/utils/pointer k8s.io/utils/trace # sigs.k8s.io/cluster-api-provider-azure v0.0.0-00010101000000-000000000000 => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20201209184807-075372e2ed03 sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1 -# sigs.k8s.io/controller-runtime v0.9.0-alpha.1.0.20210413130450-7ef2da0bc161 +# sigs.k8s.io/controller-runtime v0.9.0-beta.1.0.20210512131817-ce2f0c92d77e ## explicit sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder diff --git a/vendor/sigs.k8s.io/controller-runtime/Makefile b/vendor/sigs.k8s.io/controller-runtime/Makefile index 139c6b177b..8541f2a2fc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/Makefile +++ b/vendor/sigs.k8s.io/controller-runtime/Makefile @@ -54,9 +54,13 @@ help: ## Display this help ## -------------------------------------- .PHONY: test -test: ## Run the script check-everything.sh which will check all. +test: test-tools ## Run the script check-everything.sh which will check all. TRACE=1 ./hack/check-everything.sh +.PHONY: test-tools +test-tools: ## tests the tools codebase (setup-envtest) + cd tools/setup-envtest && go test ./... + ## -------------------------------------- ## Binaries ## -------------------------------------- @@ -74,10 +78,17 @@ $(CONTROLLER_GEN): $(TOOLS_DIR)/go.mod # Build controller-gen from tools folder. ## Linting ## -------------------------------------- -.PHONY: lint -lint: $(GOLANGCI_LINT) ## Lint codebase. +.PHONY: lint-libs +lint-libs: $(GOLANGCI_LINT) ## Lint library codebase. $(GOLANGCI_LINT) run -v +.PHONY: lint-tools +lint-tools: $(GOLANGCI_LINT) ## Lint tools codebase. + cd tools/setup-envtest && $(GOLANGCI_LINT) run -v + +.PHONY: lint +lint: lint-libs lint-tools + ## -------------------------------------- ## Generate ## -------------------------------------- diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index 71dfbd0454..dee523fe23 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -86,6 +86,9 @@ type Informer interface { HasSynced() bool } +// SelectorsByObject associate a client.Object's GVK to a field/label selector +type SelectorsByObject map[client.Object]internal.Selector + // Options are the optional arguments for creating a new InformersMap object type Options struct { // Scheme is the scheme to use for mapping objects to GroupVersionKinds @@ -103,6 +106,13 @@ type Options struct { // Namespace restricts the cache's ListWatch to the desired namespace // Default watches all namespaces Namespace string + + // SelectorsByObject restricts the cache's ListWatch to the desired + // fields per GVK at the specified object, the map's value must implement + // Selector [1] using for example a Set [2] + // [1] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Selector + // [2] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Set + SelectorsByObject SelectorsByObject } var defaultResyncTime = 10 * time.Hour @@ -113,10 +123,38 @@ func New(config *rest.Config, opts Options) (Cache, error) { if err != nil { return nil, err } - im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace) + selectorsByGVK, err := convertToSelectorsByGVK(opts.SelectorsByObject, opts.Scheme) + if err != nil { + return nil, err + } + im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, selectorsByGVK) return &informerCache{InformersMap: im}, nil } +// BuilderWithOptions returns a Cache constructor that will build the a cache +// honoring the options argument, this is useful to specify options like +// SelectorsByObject +// WARNING: if SelectorsByObject is specified. filtered out resources are not +// returned. +func BuilderWithOptions(options Options) NewCacheFunc { + return func(config *rest.Config, opts Options) (Cache, error) { + if opts.Scheme == nil { + opts.Scheme = options.Scheme + } + if opts.Mapper == nil { + opts.Mapper = options.Mapper + } + if opts.Resync == nil { + opts.Resync = options.Resync + } + if opts.Namespace == "" { + opts.Namespace = options.Namespace + } + opts.SelectorsByObject = options.SelectorsByObject + return New(config, opts) + } +} + func defaultOpts(config *rest.Config, opts Options) (Options, error) { // Use the default Kubernetes Scheme if unset if opts.Scheme == nil { @@ -139,3 +177,15 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { } return opts, nil } + +func convertToSelectorsByGVK(selectorsByObject SelectorsByObject, scheme *runtime.Scheme) (internal.SelectorsByGVK, error) { + selectorsByGVK := internal.SelectorsByGVK{} + for object, selector := range selectorsByObject { + gvk, err := apiutil.GVKForObject(object, scheme) + if err != nil { + return nil, err + } + selectorsByGVK[gvk] = selector + } + return selectorsByGVK, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go index e895631e2e..bd546b934a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -125,8 +125,15 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli labelSel = listOpts.LabelSelector } + limitSet := listOpts.Limit > 0 + runtimeObjs := make([]runtime.Object, 0, len(objs)) - for _, item := range objs { + for i, item := range objs { + // if the Limit option is set and the number of items + // listed exceeds this limit, then stop reading. + if limitSet && int64(i) >= listOpts.Limit { + break + } obj, isObj := item.(runtime.Object) if !isObj { return fmt.Errorf("cache contained %T, which is not an Object", obj) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go index 02bb1919f7..2242d9b674 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go @@ -49,12 +49,14 @@ func NewInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, - namespace string) *InformersMap { + namespace string, + selectors SelectorsByGVK, +) *InformersMap { return &InformersMap{ - structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace), - unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace), - metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace), + structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors), + unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors), + metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors), Scheme: scheme, } @@ -105,16 +107,19 @@ func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj } // newStructuredInformersMap creates a new InformersMap for structured objects. -func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createStructuredListWatch) +func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, createStructuredListWatch) } // newUnstructuredInformersMap creates a new InformersMap for unstructured objects. -func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createUnstructuredListWatch) +func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, createUnstructuredListWatch) } // newMetadataInformersMap creates a new InformersMap for metadata-only objects. -func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createMetadataListWatch) +func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, createMetadataListWatch) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go index 6b57c6fa61..5c9bd0b0a0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go @@ -34,7 +34,6 @@ import ( "k8s.io/client-go/metadata" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) @@ -48,6 +47,7 @@ func newSpecificInformersMap(config *rest.Config, mapper meta.RESTMapper, resync time.Duration, namespace string, + selectors SelectorsByGVK, createListWatcher createListWatcherFunc) *specificInformersMap { ip := &specificInformersMap{ config: config, @@ -60,6 +60,7 @@ func newSpecificInformersMap(config *rest.Config, startWait: make(chan struct{}), createListWatcher: createListWatcher, namespace: namespace, + selectors: selectors, } return ip } @@ -120,6 +121,10 @@ type specificInformersMap struct { // namespace is the namespace that all ListWatches are restricted to // default or empty string means all namespaces namespace string + + // selectors are the label or field selectors that will be added to the + // ListWatch ListOptions. + selectors SelectorsByGVK } // Start calls Run on each of the informers and sets started to true. Blocks on the context. @@ -216,6 +221,13 @@ func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, ob if err != nil { return nil, false, err } + + switch obj.(type) { + case *metav1.PartialObjectMetadata, *metav1.PartialObjectMetadataList: + ni = metadataSharedIndexInformerPreserveGVK(gvk, ni) + default: + } + i := &MapEntry{ Informer: ni, Reader: CacheReader{indexer: ni.GetIndexer(), groupVersionKind: gvk, scopeName: rm.Scope.Name()}, @@ -256,6 +268,7 @@ func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformer // Create a new ListWatch for the obj return &cache.ListWatch{ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors[gvk].ApplyToList(&opts) res := listObj.DeepCopyObject() isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot err := client.Get().NamespaceIfScoped(ip.namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res) @@ -263,6 +276,7 @@ func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformer }, // Setup the watch function WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors[gvk].ApplyToList(&opts) // Watch needs to be set to true separately opts.Watch = true isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot @@ -278,7 +292,12 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform if err != nil { return nil, err } - dynamicClient, err := dynamic.NewForConfig(ip.config) + + // If the rest configuration has a negotiated serializer passed in, + // we should remove it and use the one that the dynamic client sets for us. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + dynamicClient, err := dynamic.NewForConfig(cfg) if err != nil { return nil, err } @@ -289,6 +308,7 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform // Create a new ListWatch for the obj return &cache.ListWatch{ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors[gvk].ApplyToList(&opts) if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { return dynamicClient.Resource(mapping.Resource).Namespace(ip.namespace).List(ctx, opts) } @@ -296,6 +316,7 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform }, // Setup the watch function WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors[gvk].ApplyToList(&opts) // Watch needs to be set to true separately opts.Watch = true if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { @@ -314,8 +335,13 @@ func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersM return nil, err } + // Always clear the negotiated serializer and use the one + // set from the metadata client. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + // grab the metadata client - client, err := metadata.NewForConfig(ip.config) + client, err := metadata.NewForConfig(cfg) if err != nil { return nil, err } @@ -327,6 +353,7 @@ func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersM // create the relevant listwatch return &cache.ListWatch{ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors[gvk].ApplyToList(&opts) if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { return client.Resource(mapping.Resource).Namespace(ip.namespace).List(ctx, opts) } @@ -334,6 +361,7 @@ func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersM }, // Setup the watch function WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors[gvk].ApplyToList(&opts) // Watch needs to be set to true separately opts.Watch = true if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/metadata_infomer_wrapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/metadata_infomer_wrapper.go new file mode 100644 index 0000000000..c0fa24a5c1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/metadata_infomer_wrapper.go @@ -0,0 +1,71 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +func metadataSharedIndexInformerPreserveGVK(gvk schema.GroupVersionKind, si cache.SharedIndexInformer) cache.SharedIndexInformer { + return &sharedInformerWrapper{ + gvk: gvk, + SharedIndexInformer: si, + } +} + +type sharedInformerWrapper struct { + gvk schema.GroupVersionKind + cache.SharedIndexInformer +} + +func (s *sharedInformerWrapper) AddEventHandler(handler cache.ResourceEventHandler) { + s.SharedIndexInformer.AddEventHandler(&handlerPreserveGVK{s.gvk, handler}) +} + +func (s *sharedInformerWrapper) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { + s.SharedIndexInformer.AddEventHandlerWithResyncPeriod(&handlerPreserveGVK{s.gvk, handler}, resyncPeriod) +} + +type handlerPreserveGVK struct { + gvk schema.GroupVersionKind + cache.ResourceEventHandler +} + +func (h *handlerPreserveGVK) resetGroupVersionKind(obj interface{}) { + if v, ok := obj.(schema.ObjectKind); ok { + v.SetGroupVersionKind(h.gvk) + } +} + +func (h *handlerPreserveGVK) OnAdd(obj interface{}) { + h.resetGroupVersionKind(obj) + h.ResourceEventHandler.OnAdd(obj) +} + +func (h *handlerPreserveGVK) OnUpdate(oldObj, newObj interface{}) { + h.resetGroupVersionKind(oldObj) + h.resetGroupVersionKind(newObj) + h.ResourceEventHandler.OnUpdate(oldObj, newObj) +} + +func (h *handlerPreserveGVK) OnDelete(obj interface{}) { + h.resetGroupVersionKind(obj) + h.ResourceEventHandler.OnDelete(obj) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go new file mode 100644 index 0000000000..03eda629a2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go @@ -0,0 +1,27 @@ +package internal + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SelectorsByGVK associate a GroupVersionKind to a field/label selector +type SelectorsByGVK map[schema.GroupVersionKind]Selector + +// Selector specify the label/field selector to fill in ListOptions +type Selector struct { + Label labels.Selector + Field fields.Selector +} + +// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed +func (s Selector) ApplyToList(listOpts *metav1.ListOptions) { + if s.Label != nil { + listOpts.LabelSelector = s.Label.String() + } + if s.Field != nil { + listOpts.FieldSelector = s.Field.String() + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go index f0e18c09b0..7e3f67d8d4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -29,14 +29,19 @@ import ( "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" ) // NewCacheFunc - Function for creating a new cache from the options and a rest config type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) +// a new global namespaced cache to handle cluster scoped resources +const globalCache = "_cluster-scope" + // MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache. // This will scope the cache to a list of namespaces. Listing for all namespaces -// will list for all the namespaces that this knows about. Note that this is not intended +// will list for all the namespaces that this knows about. By default this will create +// a global cache for cluster scoped resource (having empty namespace). Note that this is not intended // to be used for excluding namespaces, this is better done via a Predicate. Also note that // you may face performance issues when using this with a high number of namespaces. func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { @@ -45,6 +50,8 @@ func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { if err != nil { return nil, err } + // create a cache for cluster scoped resources + namespaces = append(namespaces, globalCache) caches := map[string]Cache{} for _, ns := range namespaces { opts.Namespace = ns @@ -54,7 +61,7 @@ func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { } caches[ns] = c } - return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme}, nil + return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper}, nil } } @@ -65,6 +72,7 @@ func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { type multiNamespaceCache struct { namespaceToCache map[string]Cache Scheme *runtime.Scheme + RESTMapper meta.RESTMapper } var _ Cache = &multiNamespaceCache{} @@ -127,6 +135,17 @@ func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, } func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { + isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + + if !isNamespaced { + // Look into the global cache to fetch the object + cache := c.namespaceToCache[globalCache] + return cache.Get(ctx, key, obj) + } + cache, ok := c.namespaceToCache[key.Namespace] if !ok { return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key) @@ -138,6 +157,18 @@ func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) + + isNamespaced, err := objectutil.IsAPINamespaced(list, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + + if !isNamespaced { + // Look at the global cache to get the objects with the specified GVK + cache := c.namespaceToCache[globalCache] + return cache.List(ctx, list, opts...) + } + if listOpts.Namespace != corev1.NamespaceAll { cache, ok := c.namespaceToCache[listOpts.Namespace] if !ok { @@ -155,10 +186,13 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, if err != nil { return err } + + limitSet := listOpts.Limit > 0 + var resourceVersion string for _, cache := range c.namespaceToCache { listObj := list.DeepCopyObject().(client.ObjectList) - err = cache.List(ctx, listObj, opts...) + err = cache.List(ctx, listObj, &listOpts) if err != nil { return err } @@ -173,6 +207,17 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, allItems = append(allItems, items...) // The last list call should have the most correct resource version. resourceVersion = accessor.GetResourceVersion() + if limitSet { + // decrement Limit by the number of items + // fetched from the current namespace. + listOpts.Limit -= int64(len(items)) + // if a Limit was set and the number of + // items read has reached this set limit, + // then stop reading. + if listOpts.Limit == 0 { + break + } + } } listAccessor.SetResourceVersion(resourceVersion) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index b3464c655d..bb66a6dfdd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -118,15 +118,24 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi // with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from // baseConfig, if set, otherwise a default serializer will be set. func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { - cfg := createRestConfig(gvk, isUnstructured, baseConfig) - if cfg.NegotiatedSerializer == nil { - cfg.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: codecs} - } - return rest.RESTClientFor(cfg) + return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs)) +} + +// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory +// in order to avoid clearing the GVK from the decoded object. +// +// See https://github.com/kubernetes/kubernetes/issues/80609. +type serializerWithDecodedGVK struct { + serializer.WithoutConversionCodecFactory +} + +// DecoderToVersion returns an decoder that does not do conversion. +func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return serializer } //createRestConfig copies the base config and updates needed fields for a new rest config -func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config) *rest.Config { +func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config { gv := gvk.GroupVersion() cfg := rest.CopyConfig(baseConfig) @@ -147,5 +156,16 @@ func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConf } protobufSchemeLock.RUnlock() } + + if cfg.NegotiatedSerializer == nil { + if isUnstructured { + // If the object is unstructured, we need to preserve the GVK information. + // Use our own custom serializer. + cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + } else { + cfg.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: codecs} + } + } + return cfg } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 82561a8f1a..3444ab52b4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -19,6 +19,7 @@ package client import ( "context" "fmt" + "strings" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -159,7 +160,6 @@ type client struct { } // resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. -// TODO(vincepri): Remove this function and its calls once controller-runtime dependencies are upgraded to 1.16? func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) { if gvk != schema.EmptyObjectKind.GroupVersionKind() { if v, ok := obj.(schema.ObjectKind); ok { @@ -246,6 +246,8 @@ func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { case *unstructured.Unstructured: return c.unstructuredClient.Get(ctx, key, obj) case *metav1.PartialObjectMetadata: + // Metadata only object should always preserve the GVK coming in from the caller. + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) return c.metadataClient.Get(ctx, key, obj) default: return c.typedClient.Get(ctx, key, obj) @@ -254,11 +256,33 @@ func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { // List implements client.Client func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { - switch obj.(type) { + switch x := obj.(type) { case *unstructured.UnstructuredList: return c.unstructuredClient.List(ctx, obj, opts...) case *metav1.PartialObjectMetadataList: - return c.metadataClient.List(ctx, obj, opts...) + // Metadata only object should always preserve the GVK. + gvk := obj.GetObjectKind().GroupVersionKind() + defer c.resetGroupVersionKind(obj, gvk) + + // Call the list client. + if err := c.metadataClient.List(ctx, obj, opts...); err != nil { + return err + } + + // Restore the GVK for each item in the list. + itemGVK := schema.GroupVersionKind{ + Group: gvk.Group, + Version: gvk.Version, + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + Kind: strings.TrimSuffix(gvk.Kind, "List"), + } + for i := range x.Items { + item := &x.Items[i] + item.SetGroupVersionKind(itemGVK) + } + + return nil default: return c.typedClient.List(ctx, obj, opts...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go index bf6ee882bb..b3493cb025 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go @@ -133,7 +133,6 @@ type resourceMeta struct { // isNamespaced returns true if the type is namespaced func (r *resourceMeta) isNamespaced() bool { return r.mapping.Scope.Name() != meta.RESTScopeNameRoot - } // resource returns the resource name of the type diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go index ee190adb67..ded8a60d33 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go @@ -23,14 +23,17 @@ import ( "fmt" "strconv" "strings" + "sync" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilrand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/testing" @@ -45,11 +48,12 @@ type versionedTracker struct { } type fakeClient struct { - tracker versionedTracker - scheme *runtime.Scheme + tracker versionedTracker + scheme *runtime.Scheme + schemeWriteLock sync.Mutex } -var _ client.Client = &fakeClient{} +var _ client.WithWatch = &fakeClient{} const ( maxNameLength = 63 @@ -61,7 +65,7 @@ const ( // You can choose to initialize it with a slice of runtime.Object. // // Deprecated: Please use NewClientBuilder instead. -func NewFakeClient(initObjs ...runtime.Object) client.Client { +func NewFakeClient(initObjs ...runtime.Object) client.WithWatch { return NewClientBuilder().WithRuntimeObjects(initObjs...).Build() } @@ -70,7 +74,7 @@ func NewFakeClient(initObjs ...runtime.Object) client.Client { // You can choose to initialize it with a slice of runtime.Object. // // Deprecated: Please use NewClientBuilder instead. -func NewFakeClientWithScheme(clientScheme *runtime.Scheme, initObjs ...runtime.Object) client.Client { +func NewFakeClientWithScheme(clientScheme *runtime.Scheme, initObjs ...runtime.Object) client.WithWatch { return NewClientBuilder().WithScheme(clientScheme).WithRuntimeObjects(initObjs...).Build() } @@ -113,7 +117,7 @@ func (f *ClientBuilder) WithRuntimeObjects(initRuntimeObjs ...runtime.Object) *C } // Build builds and returns a new fake client. -func (f *ClientBuilder) Build() client.Client { +func (f *ClientBuilder) Build() client.WithWatch { if f.scheme == nil { f.scheme = scheme.Scheme } @@ -284,6 +288,23 @@ func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.O return err } +func (c *fakeClient) Watch(ctx context.Context, list client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { + gvk, err := apiutil.GVKForObject(list, c.scheme) + if err != nil { + return nil, err + } + + if strings.HasSuffix(gvk.Kind, "List") { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + return c.tracker.Watch(gvr, listOpts.Namespace) +} + func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { @@ -296,6 +317,14 @@ func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...cl gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] } + if _, isUnstructuredList := obj.(*unstructured.UnstructuredList); isUnstructuredList && !c.scheme.Recognizes(gvk) { + // We need tor register the ListKind with UnstructuredList: + // https://github.com/kubernetes/kubernetes/blob/7b2776b89fb1be28d4e9203bdeec079be903c103/staging/src/k8s.io/client-go/dynamic/fake/simple.go#L44-L51 + c.schemeWriteLock.Lock() + c.scheme.AddKnownTypeWithName(gvk.GroupVersion().WithKind(gvk.Kind+"List"), &unstructured.UnstructuredList{}) + c.schemeWriteLock.Unlock() + } + listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go index 6587a19407..c0fc72c5b7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go @@ -104,6 +104,8 @@ func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, op } patchOpts := &PatchOptions{} + patchOpts.ApplyOptions(opts) + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions()) if err != nil { return err diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go index 5ed8baca9b..cedcfb5961 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -57,6 +57,7 @@ func (n *namespacedClient) RESTMapper() meta.RESTMapper { // isNamespaced returns true if the object is namespace scoped. // For unstructured objects the gvk is found from the object itself. +// TODO: this is repetitive code. Remove this and use ojectutil.IsNamespaced. func isNamespaced(c Client, obj runtime.Object) (bool, error) { var gvk schema.GroupVersionKind var err error diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go index 07bf8e632d..f77d52052d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -309,6 +309,20 @@ func CreateOrPatch(ctx context.Context, c client.Client, obj client.Object, f Mu if (hasBeforeStatus || hasAfterStatus) && !reflect.DeepEqual(beforeStatus, afterStatus) { // Only issue a Status Patch if the resource has a status and the beforeStatus // and afterStatus copies differ + if result == OperationResultUpdated { + // If Status was replaced by Patch before, set it to afterStatus + objectAfterPatch, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return result, err + } + if err = unstructured.SetNestedField(objectAfterPatch, afterStatus, "status"); err != nil { + return result, err + } + // If Status was replaced by Patch before, restore patched structure to the obj + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(objectAfterPatch, obj); err != nil { + return result, err + } + } if err := c.Status().Patch(ctx, obj, statusPatch); err != nil { return result, err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go similarity index 58% rename from vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go index 8513846e2c..baa62f0bfa 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go @@ -17,9 +17,15 @@ limitations under the License. package objectutil import ( + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" apimeta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) // FilterWithLabels returns a copy of the items in objs matching labelSel @@ -40,3 +46,28 @@ func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtim } return outItems, nil } + +// IsAPINamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return false, err + } + + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + scope := restmapping.Scope.Name() + + if scope == "" { + return false, errors.New("Scope cannot be identified. Empty scope returned") + } + + if scope != meta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index 57f95ba5b3..c16a5bb5f3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -67,6 +67,7 @@ type controllerManager struct { // leaderElectionRunnables is the set of Controllers that the controllerManager injects deps into and Starts. // These Runnables are managed by lead election. leaderElectionRunnables []Runnable + // nonLeaderElectionRunnables is the set of webhook servers that the controllerManager injects deps into and Starts. // These Runnables will not be blocked by lead election. nonLeaderElectionRunnables []Runnable @@ -145,6 +146,9 @@ type controllerManager struct { certDir string webhookServer *webhook.Server + // webhookServerOnce will be called in GetWebhookServer() to optionally initialize + // webhookServer if unset, and Add() it to controllerManager. + webhookServerOnce sync.Once // leaseDuration is the duration that non-leader candidates will // wait to force acquire leadership. @@ -222,6 +226,9 @@ func (cm *controllerManager) Add(r Runnable) error { // Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. func (cm *controllerManager) SetFields(i interface{}) error { + if err := cm.cluster.SetFields(i); err != nil { + return err + } if _, err := inject.InjectorInto(cm.SetFields, i); err != nil { return err } @@ -231,9 +238,6 @@ func (cm *controllerManager) SetFields(i interface{}) error { if _, err := inject.LoggerInto(cm.logger, i); err != nil { return err } - if err := cm.cluster.SetFields(i); err != nil { - return err - } return nil } @@ -332,32 +336,19 @@ func (cm *controllerManager) GetAPIReader() client.Reader { } func (cm *controllerManager) GetWebhookServer() *webhook.Server { - server, wasNew := func() (*webhook.Server, bool) { - cm.mu.Lock() - defer cm.mu.Unlock() - - if cm.webhookServer != nil { - return cm.webhookServer, false - } - - cm.webhookServer = &webhook.Server{ - Port: cm.port, - Host: cm.host, - CertDir: cm.certDir, + cm.webhookServerOnce.Do(func() { + if cm.webhookServer == nil { + cm.webhookServer = &webhook.Server{ + Port: cm.port, + Host: cm.host, + CertDir: cm.certDir, + } } - return cm.webhookServer, true - }() - - // only add the server if *we ourselves* just registered it. - // Add has its own lock, so just do this separately -- there shouldn't - // be a "race" in this lock gap because the condition is the population - // of cm.webhookServer, not anything to do with Add. - if wasNew { - if err := cm.Add(server); err != nil { + if err := cm.Add(cm.webhookServer); err != nil { panic("unable to add webhook server to the controller manager") } - } - return server + }) + return cm.webhookServer } func (cm *controllerManager) GetLogger() logr.Logger { @@ -587,10 +578,27 @@ func (cm *controllerManager) startNonLeaderElectionRunnables() { cm.mu.Lock() defer cm.mu.Unlock() + // First start any webhook servers, which includes conversion, validation, and defaulting + // webhooks that are registered. + // + // WARNING: Webhooks MUST start before any cache is populated, otherwise there is a race condition + // between conversion webhooks and the cache sync (usually initial list) which causes the webhooks + // to never start because no cache can be populated. + for _, c := range cm.nonLeaderElectionRunnables { + if _, ok := c.(*webhook.Server); ok { + cm.startRunnable(c) + } + } + + // Start and wait for caches. cm.waitForCache(cm.internalCtx) // Start the non-leaderelection Runnables after the cache has synced for _, c := range cm.nonLeaderElectionRunnables { + if _, ok := c.(*webhook.Server); ok { + continue + } + // Controllers block, but we want to return an error if any have an error starting. // Write any Start errors to a channel so we can return them cm.startRunnable(c) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 30ff9e2516..843919427d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -209,17 +209,24 @@ type Options struct { LivenessEndpointName string // Port is the port that the webhook server serves at. - // It is used to set webhook.Server.Port. + // It is used to set webhook.Server.Port if WebhookServer is not set. Port int // Host is the hostname that the webhook server binds to. - // It is used to set webhook.Server.Host. + // It is used to set webhook.Server.Host if WebhookServer is not set. Host string // CertDir is the directory that contains the server key and certificate. - // if not set, webhook server would look up the server key and certificate in + // If not set, webhook server would look up the server key and certificate in // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate // must be named tls.key and tls.crt, respectively. + // It is used to set webhook.Server.CertDir if WebhookServer is not set. CertDir string + + // WebhookServer is an externally configured webhook.Server. By default, + // a Manager will create a default server using Port, Host, and CertDir; + // if this is set, the Manager will use this server instead. + WebhookServer *webhook.Server + // Functions to all for a user to customize the values that will be injected. // NewCache is the function that will create the cache to be used @@ -359,26 +366,28 @@ func New(config *rest.Config, options Options) (Manager, error) { } return &controllerManager{ - cluster: cluster, - recorderProvider: recorderProvider, - resourceLock: resourceLock, - metricsListener: metricsListener, - metricsExtraHandlers: metricsExtraHandlers, - controllerOptions: options.Controller, - logger: options.Logger, - elected: make(chan struct{}), - port: options.Port, - host: options.Host, - certDir: options.CertDir, - leaseDuration: *options.LeaseDuration, - renewDeadline: *options.RenewDeadline, - retryPeriod: *options.RetryPeriod, - healthProbeListener: healthProbeListener, - readinessEndpointName: options.ReadinessEndpointName, - livenessEndpointName: options.LivenessEndpointName, - gracefulShutdownTimeout: *options.GracefulShutdownTimeout, - internalProceduresStop: make(chan struct{}), - leaderElectionStopped: make(chan struct{}), + cluster: cluster, + recorderProvider: recorderProvider, + resourceLock: resourceLock, + metricsListener: metricsListener, + metricsExtraHandlers: metricsExtraHandlers, + controllerOptions: options.Controller, + logger: options.Logger, + elected: make(chan struct{}), + port: options.Port, + host: options.Host, + certDir: options.CertDir, + webhookServer: options.WebhookServer, + leaseDuration: *options.LeaseDuration, + renewDeadline: *options.RenewDeadline, + retryPeriod: *options.RetryPeriod, + healthProbeListener: healthProbeListener, + readinessEndpointName: options.ReadinessEndpointName, + livenessEndpointName: options.LivenessEndpointName, + gracefulShutdownTimeout: *options.GracefulShutdownTimeout, + internalProceduresStop: make(chan struct{}), + leaderElectionStopped: make(chan struct{}), + leaderElectionReleaseOnCancel: options.LeaderElectionReleaseOnCancel, }, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go index e6179d3729..26900cf2eb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go @@ -77,6 +77,16 @@ func (hs multiMutating) InjectFunc(f inject.Func) error { return nil } +// InjectDecoder injects the decoder into the handlers. +func (hs multiMutating) InjectDecoder(d *Decoder) error { + for _, handler := range hs { + if _, err := InjectDecoderInto(d, handler); err != nil { + return err + } + } + return nil +} + // MultiMutatingHandler combines multiple mutating webhook handlers into a single // mutating webhook handler. Handlers are called in sequential order, and the first // `allowed: false` response may short-circuit the rest. Users must take care to @@ -125,3 +135,13 @@ func (hs multiValidating) InjectFunc(f inject.Func) error { return nil } + +// InjectDecoder injects the decoder into the handlers. +func (hs multiValidating) InjectDecoder(d *Decoder) error { + for _, handler := range hs { + if _, err := InjectDecoderInto(d, handler); err != nil { + return err + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go index 4430c3132c..d8c7721501 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go @@ -27,8 +27,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/json" + "k8s.io/client-go/kubernetes/scheme" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) var ( @@ -110,6 +113,9 @@ func (f HandlerFunc) Handle(ctx context.Context, req Request) Response { } // Webhook represents each individual webhook. +// +// It must be registered with a webhook.Server or +// populated by StandaloneWebhook to be ran on an arbitrary HTTP server. type Webhook struct { // Handler actually processes an admission request returning whether it was allowed or denied, // and potentially patches to apply to the handler. @@ -203,3 +209,47 @@ func (w *Webhook) InjectFunc(f inject.Func) error { return setFields(w.Handler) } + +// StandaloneOptions let you configure a StandaloneWebhook. +type StandaloneOptions struct { + // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources + // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better + // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. + Scheme *runtime.Scheme + // Logger to be used by the webhook. + // If none is set, it defaults to log.Log global logger. + Logger logr.Logger + // MetricsPath is used for labelling prometheus metrics + // by the path is served on. + // If none is set, prometheus metrics will not be generated. + MetricsPath string +} + +// StandaloneWebhook prepares a webhook for use without a webhook.Server, +// passing in the information normally populated by webhook.Server +// and instrumenting the webhook with metrics. +// +// Use this to attach your webhook to an arbitrary HTTP server or mux. +// +// Note that you are responsible for terminating TLS if you use StandaloneWebhook +// in your own server/mux. In order to be accessed by a kubernetes cluster, +// all webhook servers require TLS. +func StandaloneWebhook(hook *Webhook, opts StandaloneOptions) (http.Handler, error) { + if opts.Scheme == nil { + opts.Scheme = scheme.Scheme + } + + if err := hook.InjectScheme(opts.Scheme); err != nil { + return nil, err + } + + if opts.Logger == nil { + opts.Logger = logf.RuntimeLog.WithName("webhook") + } + hook.log = opts.Logger + + if opts.MetricsPath == "" { + return hook, nil + } + return metrics.InstrumentedHook(opts.MetricsPath, hook), nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go index a29643b244..557004908b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go @@ -17,7 +17,10 @@ limitations under the License. package metrics import ( + "net/http" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" "sigs.k8s.io/controller-runtime/pkg/metrics" ) @@ -59,3 +62,24 @@ var ( func init() { metrics.Registry.MustRegister(RequestLatency, RequestTotal, RequestInFlight) } + +// InstrumentedHook adds some instrumentation on top of the given webhook. +func InstrumentedHook(path string, hookRaw http.Handler) http.Handler { + lbl := prometheus.Labels{"webhook": path} + + lat := RequestLatency.MustCurryWith(lbl) + cnt := RequestTotal.MustCurryWith(lbl) + gge := RequestInFlight.With(lbl) + + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + + return promhttp.InstrumentHandlerDuration( + lat, + promhttp.InstrumentHandlerCounter( + cnt, + promhttp.InstrumentHandlerInFlight(gge, hookRaw), + ), + ) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go index 9fefc9a697..cdd34c9660 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -29,8 +29,8 @@ import ( "strconv" "sync" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" + "k8s.io/apimachinery/pkg/runtime" + kscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/certwatcher" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" @@ -41,6 +41,12 @@ var DefaultPort = 9443 // Server is an admission webhook server that can serve traffic and // generates related k8s resources for deploying. +// +// TLS is required for a webhook to be accessed by kubernetes, so +// you must provide a CertName and KeyName or have valid cert/key +// at the default locations (tls.crt and tls.key). If you do not +// want to configure TLS (i.e for testing purposes) run an +// admission.StandaloneWebhook in your own server. type Server struct { // Host is the address that the server will listen on. // Defaults to "" - all addresses. @@ -124,7 +130,7 @@ func (s *Server) Register(path string, hook http.Handler) { } // TODO(directxman12): call setfields if we've already started the server s.webhooks[path] = hook - s.WebhookMux.Handle(path, instrumentedHook(path, hook)) + s.WebhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) regLog := log.WithValues("path", path) regLog.Info("registering webhook") @@ -149,25 +155,24 @@ func (s *Server) Register(path string, hook http.Handler) { } } -// instrumentedHook adds some instrumentation on top of the given webhook. -func instrumentedHook(path string, hookRaw http.Handler) http.Handler { - lbl := prometheus.Labels{"webhook": path} - - lat := metrics.RequestLatency.MustCurryWith(lbl) - cnt := metrics.RequestTotal.MustCurryWith(lbl) - gge := metrics.RequestInFlight.With(lbl) - - // Initialize the most likely HTTP status codes. - cnt.WithLabelValues("200") - cnt.WithLabelValues("500") - - return promhttp.InstrumentHandlerDuration( - lat, - promhttp.InstrumentHandlerCounter( - cnt, - promhttp.InstrumentHandlerInFlight(gge, hookRaw), - ), - ) +// StartStandalone runs a webhook server without +// a controller manager. +func (s *Server) StartStandalone(ctx context.Context, scheme *runtime.Scheme) error { + // Use the Kubernetes client-go scheme if none is specified + if scheme == nil { + scheme = kscheme.Scheme + } + + if err := s.InjectFunc(func(i interface{}) error { + if _, err := inject.SchemeInto(scheme, i); err != nil { + return err + } + return nil + }); err != nil { + return err + } + + return s.Start(ctx) } // Start runs the server.