diff --git a/internal/mode/static/handler.go b/internal/mode/static/handler.go index 53e17a02ff..d0621e2fa0 100644 --- a/internal/mode/static/handler.go +++ b/internal/mode/static/handler.go @@ -6,6 +6,7 @@ import ( "time" "github.com/go-logr/logr" + ngxclient "github.com/nginxinc/nginx-plus-go-client/client" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -98,21 +99,33 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log h.handleEvent(ctx, logger, event) } - changed, graph := h.cfg.processor.Process() - if !changed { + changeType, graph := h.cfg.processor.Process() + + var err error + switch changeType { + case state.NoChange: logger.Info("Handling events didn't result into NGINX configuration changes") if !h.cfg.healthChecker.ready && h.cfg.healthChecker.firstBatchError == nil { h.cfg.healthChecker.setAsReady() } return + case state.EndpointsOnlyChange: + h.cfg.version++ + err = h.updateUpstreamServers( + ctx, + logger, + dataplane.BuildConfiguration(ctx, graph, h.cfg.serviceResolver, h.cfg.version), + ) + case state.ClusterStateChange: + h.cfg.version++ + err = h.updateNginxConf( + ctx, + dataplane.BuildConfiguration(ctx, graph, h.cfg.serviceResolver, h.cfg.version), + ) } var nginxReloadRes nginxReloadResult - h.cfg.version++ - if err := h.updateNginx( - ctx, - dataplane.BuildConfiguration(ctx, graph, h.cfg.serviceResolver, h.cfg.version), - ); err != nil { + if err != nil { logger.Error(err, "Failed to update NGINX configuration") nginxReloadRes.error = err if !h.cfg.healthChecker.ready { @@ -174,9 +187,9 @@ func (h *eventHandlerImpl) handleEvent(ctx context.Context, logger logr.Logger, } } -func (h *eventHandlerImpl) updateNginx(ctx context.Context, conf dataplane.Configuration) error { +// updateNginxConf updates nginx conf files and reloads nginx +func (h *eventHandlerImpl) updateNginxConf(ctx context.Context, conf dataplane.Configuration) error { files := h.cfg.generator.Generate(conf) - if err := h.cfg.nginxFileMgr.ReplaceFiles(files); err != nil { return fmt.Errorf("failed to replace NGINX configuration files: %w", err) } @@ -188,6 +201,93 @@ func (h *eventHandlerImpl) updateNginx(ctx context.Context, conf dataplane.Confi return nil } +// updateUpstreamServers is called only when endpoints have changed. It updates nginx conf files and then: +// - if using NGINX Plus, determines which servers have changed and uses the N+ API to update them; +// - otherwise if not using NGINX Plus, or an error was returned from the API, reloads nginx +func (h *eventHandlerImpl) updateUpstreamServers( + ctx context.Context, + logger logr.Logger, + conf dataplane.Configuration, +) error { + isPlus := h.cfg.nginxRuntimeMgr.IsPlus() + + files := h.cfg.generator.Generate(conf) + if err := h.cfg.nginxFileMgr.ReplaceFiles(files); err != nil { + return fmt.Errorf("failed to replace NGINX configuration files: %w", err) + } + + reload := func() error { + if err := h.cfg.nginxRuntimeMgr.Reload(ctx, conf.Version); err != nil { + return fmt.Errorf("failed to reload NGINX: %w", err) + } + + return nil + } + + if isPlus { + type upstream struct { + name string + servers []ngxclient.UpstreamServer + } + var upstreams []upstream + + prevUpstreams, err := h.cfg.nginxRuntimeMgr.GetUpstreams() + if err != nil { + logger.Error(err, "failed to get upstreams from API, reloading configuration instead") + return reload() + } + + for _, u := range conf.Upstreams { + upstream := upstream{ + name: u.Name, + servers: ngxConfig.ConvertEndpoints(u.Endpoints), + } + + if u, ok := prevUpstreams[upstream.name]; ok { + if !serversEqual(upstream.servers, u.Peers) { + upstreams = append(upstreams, upstream) + } + } + } + + var reloadPlus bool + for _, upstream := range upstreams { + if err := h.cfg.nginxRuntimeMgr.UpdateHTTPServers(upstream.name, upstream.servers); err != nil { + logger.Error( + err, "couldn't update upstream via the API, reloading configuration instead", + "upstreamName", upstream.name, + ) + reloadPlus = true + } + } + + if !reloadPlus { + return nil + } + } + + return reload() +} + +func serversEqual(newServers []ngxclient.UpstreamServer, oldServers []ngxclient.Peer) bool { + if len(newServers) != len(oldServers) { + return false + } + + diff := make(map[string]struct{}, len(newServers)) + for _, s := range newServers { + diff[s.Server] = struct{}{} + } + + for _, s := range oldServers { + if _, ok := diff[s.Server]; !ok { + return false + } + } + + return true +} + // updateControlPlaneAndSetStatus updates the control plane configuration and then sets the status // based on the outcome func (h *eventHandlerImpl) updateControlPlaneAndSetStatus( diff --git a/internal/mode/static/handler_test.go b/internal/mode/static/handler_test.go index 61ad16a86b..9adbf6aeca 100644 --- a/internal/mode/static/handler_test.go +++ b/internal/mode/static/handler_test.go @@ -4,10 +4,12 @@ import ( "context" "errors" + ngxclient "github.com/nginxinc/nginx-plus-go-client/client" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "go.uber.org/zap" v1 "k8s.io/api/core/v1" + discoveryV1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -27,6 +29,7 @@ import ( "github.com/nginxinc/nginx-gateway-fabric/internal/mode/static/nginx/file" "github.com/nginxinc/nginx-gateway-fabric/internal/mode/static/nginx/file/filefakes" "github.com/nginxinc/nginx-gateway-fabric/internal/mode/static/nginx/runtime/runtimefakes" + "github.com/nginxinc/nginx-gateway-fabric/internal/mode/static/state" staticConds "github.com/nginxinc/nginx-gateway-fabric/internal/mode/static/state/conditions" "github.com/nginxinc/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginxinc/nginx-gateway-fabric/internal/mode/static/state/graph" @@ -64,6 +67,7 @@ var _ = Describe("eventHandler", func() { BeforeEach(func() { fakeProcessor = &statefakes.FakeChangeProcessor{} + fakeProcessor.ProcessReturns(state.NoChange, &graph.Graph{}) fakeGenerator = &configfakes.FakeGenerator{} fakeNginxFileMgr = &filefakes.FakeManager{} fakeNginxRuntimeMgr = &runtimefakes.FakeManager{} @@ -112,7 +116,7 @@ var _ = Describe("eventHandler", func() { } BeforeEach(func() { - fakeProcessor.ProcessReturns(true /* changed */, &graph.Graph{}) + fakeProcessor.ProcessReturns(state.ClusterStateChange /* changed */, &graph.Graph{}) fakeGenerator.GenerateReturns(fakeCfgFiles) }) @@ -280,11 +284,129 @@ var _ = Describe("eventHandler", func() { }) }) + When("receiving an EndpointsOnlyChange update", func() { + e := &events.UpsertEvent{Resource: &discoveryV1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-gateway", + Namespace: "nginx-gateway", + }, + }} + batch := []interface{}{e} + + BeforeEach(func() { + fakeProcessor.ProcessReturns(state.EndpointsOnlyChange, &graph.Graph{}) + upstreams := ngxclient.Upstreams{ + "one": ngxclient.Upstream{ + Peers: []ngxclient.Peer{ + {Server: "server1"}, + }, + }, + } + fakeNginxRuntimeMgr.GetUpstreamsReturns(upstreams, nil) + }) + + When("running NGINX Plus", func() { + It("should call the NGINX Plus API", func() { + fakeNginxRuntimeMgr.IsPlusReturns(true) + + handler.HandleEventBatch(context.Background(), ctlrZap.New(), batch) + Expect(fakeGenerator.GenerateCallCount()).To(Equal(1)) + Expect(fakeNginxFileMgr.ReplaceFilesCallCount()).To(Equal(1)) + Expect(fakeNginxRuntimeMgr.GetUpstreamsCallCount()).To(Equal(1)) + }) + }) + + When("not running NGINX Plus", func() { + It("should not call the NGINX Plus API", func() { + handler.HandleEventBatch(context.Background(), ctlrZap.New(), batch) + Expect(fakeGenerator.GenerateCallCount()).To(Equal(1)) + Expect(fakeNginxFileMgr.ReplaceFilesCallCount()).To(Equal(1)) + Expect(fakeNginxRuntimeMgr.GetUpstreamsCallCount()).To(Equal(0)) + Expect(fakeNginxRuntimeMgr.ReloadCallCount()).To(Equal(1)) + }) + }) + }) + + When("updating upstream servers", func() { + conf := dataplane.Configuration{ + Upstreams: []dataplane.Upstream{ + { + Name: "one", + }, + }, + } + + type callCounts struct { + generate int + update int + reload int + } + + assertCallCounts := func(cc callCounts) { + Expect(fakeGenerator.GenerateCallCount()).To(Equal(cc.generate)) + Expect(fakeNginxFileMgr.ReplaceFilesCallCount()).To(Equal(cc.generate)) + Expect(fakeNginxRuntimeMgr.UpdateHTTPServersCallCount()).To(Equal(cc.update)) + Expect(fakeNginxRuntimeMgr.ReloadCallCount()).To(Equal(cc.reload)) + } + + BeforeEach(func() { + upstreams := ngxclient.Upstreams{ + "one": ngxclient.Upstream{ + Peers: []ngxclient.Peer{ + {Server: "server1"}, + }, + }, + } + fakeNginxRuntimeMgr.GetUpstreamsReturns(upstreams, nil) + }) + + When("running NGINX Plus", func() { + BeforeEach(func() { + fakeNginxRuntimeMgr.IsPlusReturns(true) + }) + + It("should update servers using the NGINX Plus API", func() { + Expect(handler.updateUpstreamServers(context.Background(), ctlrZap.New(), conf)).To(Succeed()) + + assertCallCounts(callCounts{generate: 1, update: 1, reload: 0}) + }) + + It("should reload when GET API returns an error", func() { + fakeNginxRuntimeMgr.GetUpstreamsReturns(nil, errors.New("error")) + Expect(handler.updateUpstreamServers(context.Background(), ctlrZap.New(), conf)).To(Succeed()) + + assertCallCounts(callCounts{generate: 1, update: 0, reload: 1}) + }) + + It("should reload when POST API returns an error", func() { + fakeNginxRuntimeMgr.UpdateHTTPServersReturns(errors.New("error")) + Expect(handler.updateUpstreamServers(context.Background(), ctlrZap.New(), conf)).To(Succeed()) + + assertCallCounts(callCounts{generate: 1, update: 1, reload: 1}) + }) + }) + + When("not running NGINX Plus", func() { + It("should update servers by reloading", func() { + Expect(handler.updateUpstreamServers(context.Background(), ctlrZap.New(), conf)).To(Succeed()) + + assertCallCounts(callCounts{generate: 1, update: 0, reload: 1}) + }) + + It("should return an error when reloading fails", func() { + fakeNginxRuntimeMgr.ReloadReturns(errors.New("error")) + Expect(handler.updateUpstreamServers(context.Background(), ctlrZap.New(), conf)).ToNot(Succeed()) + + assertCallCounts(callCounts{generate: 1, update: 0, reload: 1}) + }) + }) + }) + It("should set the health checker status properly when there are changes", func() { e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} batch := []interface{}{e} - fakeProcessor.ProcessReturns(true, &graph.Graph{}) + fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{}) Expect(handler.cfg.healthChecker.readyCheck(nil)).ToNot(Succeed()) handler.HandleEventBatch(context.Background(), ctlrZap.New(), batch) @@ -304,7 +426,7 @@ var _ = Describe("eventHandler", func() { e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} batch := []interface{}{e} - fakeProcessor.ProcessReturns(true, &graph.Graph{}) + fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{}) fakeNginxRuntimeMgr.ReloadReturns(errors.New("reload error")) handler.HandleEventBatch(context.Background(), ctlrZap.New(), batch) @@ -312,14 +434,14 @@ var _ = Describe("eventHandler", func() { Expect(handler.cfg.healthChecker.readyCheck(nil)).ToNot(Succeed()) // now send an update with no changes; should still return an error - fakeProcessor.ProcessReturns(false, &graph.Graph{}) + fakeProcessor.ProcessReturns(state.NoChange, &graph.Graph{}) handler.HandleEventBatch(context.Background(), ctlrZap.New(), batch) Expect(handler.cfg.healthChecker.readyCheck(nil)).ToNot(Succeed()) // error goes away - fakeProcessor.ProcessReturns(true, &graph.Graph{}) + fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{}) fakeNginxRuntimeMgr.ReloadReturns(nil) handler.HandleEventBatch(context.Background(), ctlrZap.New(), batch) @@ -339,6 +461,46 @@ var _ = Describe("eventHandler", func() { }) }) +var _ = Describe("serversEqual", func() { + DescribeTable("determines if server lists are equal", + func(newServers []ngxclient.UpstreamServer, oldServers []ngxclient.Peer, equal bool) { + Expect(serversEqual(newServers, oldServers)).To(Equal(equal)) + }, + Entry("different length", + []ngxclient.UpstreamServer{ + {Server: "server1"}, + }, + []ngxclient.Peer{ + {Server: "server1"}, + {Server: "server2"}, + }, + false, + ), + Entry("differing elements", + []ngxclient.UpstreamServer{ + {Server: "server1"}, + {Server: "server2"}, + }, + []ngxclient.Peer{ + {Server: "server1"}, + {Server: "server3"}, + }, + false, + ), + Entry("same elements", + []ngxclient.UpstreamServer{ + {Server: "server1"}, + {Server: "server2"}, + }, + []ngxclient.Peer{ + {Server: "server1"}, + {Server: "server2"}, + }, + true, + ), + ) +}) + var _ = Describe("getGatewayAddresses", func() { It("gets gateway addresses from a Service", func() { fakeClient := fake.NewFakeClient() diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index 46272570c5..e3bd95ff8f 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -6,6 +6,7 @@ import ( "time" "github.com/go-logr/logr" + ngxclient "github.com/nginxinc/nginx-plus-go-client/client" "github.com/prometheus/client_golang/prometheus" apiv1 "k8s.io/api/core/v1" discoveryV1 "k8s.io/api/discovery/v1" @@ -68,48 +69,12 @@ func init() { // nolint:gocyclo func StartManager(cfg config.Config) error { - options := manager.Options{ - Scheme: scheme, - Logger: cfg.Logger, - Metrics: getMetricsOptions(cfg.MetricsConfig), - // Note: when the leadership is lost, the manager will return an error in the Start() method. - // However, it will not wait for any Runnable it starts to finish, meaning any in-progress operations - // might get terminated half-way. - LeaderElection: true, - LeaderElectionNamespace: cfg.GatewayPodConfig.Namespace, - LeaderElectionID: cfg.LeaderElection.LockName, - // We're not enabling LeaderElectionReleaseOnCancel because when the Manager stops gracefully, it waits - // for all started Runnables (including Leader-only ones) to finish. Otherwise, the new leader might start - // running Leader-only Runnables before the old leader has finished running them. - // See the doc comment for the LeaderElectionReleaseOnCancel for more details. - LeaderElectionReleaseOnCancel: false, - Controller: ctrlcfg.Controller{ - // All of our controllers still need to work in case of non-leader pods - NeedLeaderElection: helpers.GetPointer(false), - }, - } - - if cfg.HealthConfig.Enabled { - options.HealthProbeBindAddress = fmt.Sprintf(":%d", cfg.HealthConfig.Port) - } - - eventCh := make(chan interface{}) - - clusterCfg := ctlr.GetConfigOrDie() - clusterCfg.Timeout = clusterTimeout - - mgr, err := manager.New(clusterCfg, options) + hc := &healthChecker{} + mgr, err := createManager(cfg, hc) if err != nil { return fmt.Errorf("cannot build runtime manager: %w", err) } - hc := &healthChecker{} - if cfg.HealthConfig.Enabled { - if err := mgr.AddReadyzCheck("readyz", hc.readyCheck); err != nil { - return fmt.Errorf("error adding ready check: %w", err) - } - } - recorderName := fmt.Sprintf("nginx-gateway-fabric-%s", cfg.GatewayClassName) recorder := mgr.GetEventRecorderFor(recorderName) @@ -122,6 +87,7 @@ func StartManager(cfg config.Config) error { ctx := ctlr.SetupSignalHandler() + eventCh := make(chan interface{}) controlConfigNSName := types.NamespacedName{ Namespace: cfg.GatewayPodConfig.Namespace, Name: cfg.ConfigName, @@ -165,15 +131,22 @@ func StartManager(cfg config.Config) error { var ( ngxruntimeCollector ngxruntime.MetricsCollector = collectors.NewManagerNoopCollector() - // nolint:ineffassign // not an ineffectual assignment. Will be used if metrics are disabled. - handlerCollector handlerMetricsCollector = collectors.NewControllerNoopCollector() + handlerCollector handlerMetricsCollector = collectors.NewControllerNoopCollector() ) + var ngxPlusClient *ngxclient.NginxClient + if cfg.Plus { + ngxPlusClient, err = ngxruntime.CreatePlusClient() + if err != nil { + return fmt.Errorf("error creating NGINX plus client: %w", err) + } + } + if cfg.MetricsConfig.Enabled { constLabels := map[string]string{"class": cfg.GatewayClassName} var ngxCollector prometheus.Collector if cfg.Plus { - ngxCollector, err = collectors.NewNginxPlusMetricsCollector(constLabels, promLogger) + ngxCollector, err = collectors.NewNginxPlusMetricsCollector(ngxPlusClient, constLabels, promLogger) } else { ngxCollector = collectors.NewNginxMetricsCollector(constLabels, promLogger) } @@ -204,13 +177,17 @@ func StartManager(cfg config.Config) error { k8sClient: mgr.GetClient(), processor: processor, serviceResolver: resolver.NewServiceResolverImpl(mgr.GetClient()), - generator: ngxcfg.NewGeneratorImpl(), + generator: ngxcfg.NewGeneratorImpl(cfg.Plus), logLevelSetter: logLevelSetter, nginxFileMgr: file.NewManagerImpl( cfg.Logger.WithName("nginxFileManager"), file.NewStdLibOSFileManager(), ), - nginxRuntimeMgr: ngxruntime.NewManagerImpl(ngxruntimeCollector), + nginxRuntimeMgr: ngxruntime.NewManagerImpl( + ngxPlusClient, + ngxruntimeCollector, + cfg.Logger.WithName("nginxRuntimeManager"), + ), statusUpdater: statusUpdater, eventRecorder: recorder, healthChecker: hc, @@ -254,6 +231,49 @@ func StartManager(cfg config.Config) error { return mgr.Start(ctx) } +func createManager(cfg config.Config, hc *healthChecker) (manager.Manager, error) { + options := manager.Options{ + Scheme: scheme, + Logger: cfg.Logger, + Metrics: getMetricsOptions(cfg.MetricsConfig), + // Note: when the leadership is lost, the manager will return an error in the Start() method. + // However, it will not wait for any Runnable it starts to finish, meaning any in-progress operations + // might get terminated half-way. + LeaderElection: true, + LeaderElectionNamespace: cfg.GatewayPodConfig.Namespace, + LeaderElectionID: cfg.LeaderElection.LockName, + // We're not enabling LeaderElectionReleaseOnCancel because when the Manager stops gracefully, it waits + // for all started Runnables (including Leader-only ones) to finish. Otherwise, the new leader might start + // running Leader-only Runnables before the old leader has finished running them. + // See the doc comment for the LeaderElectionReleaseOnCancel for more details. + LeaderElectionReleaseOnCancel: false, + Controller: ctrlcfg.Controller{ + // All of our controllers still need to work in case of non-leader pods + NeedLeaderElection: helpers.GetPointer(false), + }, + } + + if cfg.HealthConfig.Enabled { + options.HealthProbeBindAddress = fmt.Sprintf(":%d", cfg.HealthConfig.Port) + } + + clusterCfg := ctlr.GetConfigOrDie() + clusterCfg.Timeout = clusterTimeout + + mgr, err := manager.New(clusterCfg, options) + if err != nil { + return nil, err + } + + if cfg.HealthConfig.Enabled { + if err := mgr.AddReadyzCheck("readyz", hc.readyCheck); err != nil { + return nil, fmt.Errorf("error adding ready check: %w", err) + } + } + + return mgr, nil +} + func registerControllers( ctx context.Context, cfg config.Config, diff --git a/internal/mode/static/metrics/collectors/nginx.go b/internal/mode/static/metrics/collectors/nginx.go index b68123db29..87d8039e22 100644 --- a/internal/mode/static/metrics/collectors/nginx.go +++ b/internal/mode/static/metrics/collectors/nginx.go @@ -1,11 +1,6 @@ package collectors import ( - "context" - "fmt" - "net" - "net/http" - "github.com/go-kit/log" "github.com/nginxinc/nginx-plus-go-client/client" prometheusClient "github.com/nginxinc/nginx-prometheus-exporter/client" @@ -13,30 +8,28 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/nginxinc/nginx-gateway-fabric/internal/mode/static/metrics" + "github.com/nginxinc/nginx-gateway-fabric/internal/mode/static/nginx/runtime" ) const ( - nginxStatusSock = "/var/run/nginx/nginx-status.sock" - nginxStatusURI = "http://config-status/stub_status" - nginxPlusAPISock = "/var/run/nginx/nginx-plus-api.sock" - nginxPlusAPIURI = "http://nginx-plus-api/api" + nginxStatusSock = "/var/run/nginx/nginx-status.sock" + nginxStatusURI = "http://config-status/stub_status" ) // NewNginxMetricsCollector creates an NginxCollector which fetches stats from NGINX over a unix socket func NewNginxMetricsCollector(constLabels map[string]string, logger log.Logger) prometheus.Collector { - httpClient := getSocketClient(nginxStatusSock) + httpClient := runtime.GetSocketClient(nginxStatusSock) ngxClient := prometheusClient.NewNginxClient(&httpClient, nginxStatusURI) return nginxCollector.NewNginxCollector(ngxClient, metrics.Namespace, constLabels, logger) } // NewNginxPlusMetricsCollector creates an NginxCollector which fetches stats from NGINX Plus API over a unix socket -func NewNginxPlusMetricsCollector(constLabels map[string]string, logger log.Logger) (prometheus.Collector, error) { - plusClient, err := createPlusClient() - if err != nil { - return nil, err - } - +func NewNginxPlusMetricsCollector( + plusClient *client.NginxClient, + constLabels map[string]string, + logger log.Logger, +) (prometheus.Collector, error) { collector := nginxCollector.NewNginxPlusCollector( plusClient, metrics.Namespace, @@ -47,26 +40,3 @@ func NewNginxPlusMetricsCollector(constLabels map[string]string, logger log.Logg return collector, nil } - -// getSocketClient gets an http.Client with a unix socket transport. -func getSocketClient(sockPath string) http.Client { - return http.Client{ - Transport: &http.Transport{ - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("unix", sockPath) - }, - }, - } -} - -func createPlusClient() (*client.NginxClient, error) { - var plusClient *client.NginxClient - var err error - - httpClient := getSocketClient(nginxPlusAPISock) - plusClient, err = client.NewNginxClient(nginxPlusAPIURI, client.WithHTTPClient(&httpClient)) - if err != nil { - return nil, fmt.Errorf("failed to create NginxClient for Plus: %w", err) - } - return plusClient, nil -} diff --git a/internal/mode/static/nginx/conf/nginx-plus.conf b/internal/mode/static/nginx/conf/nginx-plus.conf index 3ad03f2de6..56924b0069 100644 --- a/internal/mode/static/nginx/conf/nginx-plus.conf +++ b/internal/mode/static/nginx/conf/nginx-plus.conf @@ -46,7 +46,7 @@ http { access_log off; location /api { - api write=off; + api write=on; } } } diff --git a/internal/mode/static/nginx/config/convert.go b/internal/mode/static/nginx/config/convert.go new file mode 100644 index 0000000000..d8435a0e22 --- /dev/null +++ b/internal/mode/static/nginx/config/convert.go @@ -0,0 +1,29 @@ +package config + +import ( + "fmt" + + ngxclient "github.com/nginxinc/nginx-plus-go-client/client" + + "github.com/nginxinc/nginx-gateway-fabric/internal/mode/static/state/resolver" +) + +// ConvertEndpoints converts a list of Endpoints into a list of NGINX Plus SDK UpstreamServers. +func ConvertEndpoints(eps []resolver.Endpoint) []ngxclient.UpstreamServer { + servers := make([]ngxclient.UpstreamServer, 0, len(eps)) + + for _, ep := range eps { + var port string + if ep.Port != 0 { + port = fmt.Sprintf(":%d", ep.Port) + } + + server := ngxclient.UpstreamServer{ + Server: fmt.Sprintf("%s%s", ep.Address, port), + } + + servers = append(servers, server) + } + + return servers +} diff --git a/internal/mode/static/nginx/config/convert_test.go b/internal/mode/static/nginx/config/convert_test.go new file mode 100644 index 0000000000..c755f2c3c9 --- /dev/null +++ b/internal/mode/static/nginx/config/convert_test.go @@ -0,0 +1,35 @@ +package config + +import ( + "testing" + + ngxclient "github.com/nginxinc/nginx-plus-go-client/client" + . "github.com/onsi/gomega" + + "github.com/nginxinc/nginx-gateway-fabric/internal/mode/static/state/resolver" +) + +func TestConvertEndpoints(t *testing.T) { + endpoints := []resolver.Endpoint{ + { + Address: "1.2.3.4", + Port: 80, + }, + { + Address: "5.6.7.8", + Port: 0, + }, + } + + expUpstreams := []ngxclient.UpstreamServer{ + { + Server: "1.2.3.4:80", + }, + { + Server: "5.6.7.8", + }, + } + + g := NewWithT(t) + g.Expect(ConvertEndpoints(endpoints)).To(Equal(expUpstreams)) +} diff --git a/internal/mode/static/nginx/config/generator.go b/internal/mode/static/nginx/config/generator.go index 77cb2220d0..ecbb5cece2 100644 --- a/internal/mode/static/nginx/config/generator.go +++ b/internal/mode/static/nginx/config/generator.go @@ -43,11 +43,13 @@ type Generator interface { // // It also expects that the main NGINX configuration file nginx.conf is located in configFolder and nginx.conf // includes (https://nginx.org/en/docs/ngx_core_module.html#include) the files from httpFolder. -type GeneratorImpl struct{} +type GeneratorImpl struct { + plus bool +} // NewGeneratorImpl creates a new GeneratorImpl. -func NewGeneratorImpl() GeneratorImpl { - return GeneratorImpl{} +func NewGeneratorImpl(plus bool) GeneratorImpl { + return GeneratorImpl{plus: plus} } // executeFunc is a function that generates NGINX configuration from internal representation. @@ -64,7 +66,7 @@ func (g GeneratorImpl) Generate(conf dataplane.Configuration) []file.File { files = append(files, generatePEM(id, pair.Cert, pair.Key)) } - files = append(files, generateHTTPConfig(conf)) + files = append(files, g.generateHTTPConfig(conf)) files = append(files, generateConfigVersion(conf.Version)) @@ -88,9 +90,9 @@ func generatePEMFileName(id dataplane.SSLKeyPairID) string { return filepath.Join(secretsFolder, string(id)+".pem") } -func generateHTTPConfig(conf dataplane.Configuration) file.File { +func (g GeneratorImpl) generateHTTPConfig(conf dataplane.Configuration) file.File { var c []byte - for _, execute := range getExecuteFuncs() { + for _, execute := range g.getExecuteFuncs() { c = append(c, execute(conf)...) } @@ -101,9 +103,9 @@ func generateHTTPConfig(conf dataplane.Configuration) file.File { } } -func getExecuteFuncs() []executeFunc { +func (g GeneratorImpl) getExecuteFuncs() []executeFunc { return []executeFunc{ - executeUpstreams, + g.executeUpstreams, executeSplitClients, executeServers, executeMaps, diff --git a/internal/mode/static/nginx/config/generator_test.go b/internal/mode/static/nginx/config/generator_test.go index 34cd858d05..c42d44cb49 100644 --- a/internal/mode/static/nginx/config/generator_test.go +++ b/internal/mode/static/nginx/config/generator_test.go @@ -62,7 +62,8 @@ func TestGenerate(t *testing.T) { } g := NewWithT(t) - generator := config.NewGeneratorImpl() + var plus bool + generator := config.NewGeneratorImpl(plus) files := generator.Generate(conf) diff --git a/internal/mode/static/nginx/config/http/config.go b/internal/mode/static/nginx/config/http/config.go index 59ce01d3f6..ae0d53546e 100644 --- a/internal/mode/static/nginx/config/http/config.go +++ b/internal/mode/static/nginx/config/http/config.go @@ -52,8 +52,9 @@ const ( // Upstream holds all configuration for an HTTP upstream. type Upstream struct { - Name string - Servers []UpstreamServer + Name string + ZoneSize string // format: 512k, 1m + Servers []UpstreamServer } // UpstreamServer holds all configuration for an HTTP upstream server. diff --git a/internal/mode/static/nginx/config/servers_template.go b/internal/mode/static/nginx/config/servers_template.go index 9afa4cdf04..a56c55820e 100644 --- a/internal/mode/static/nginx/config/servers_template.go +++ b/internal/mode/static/nginx/config/servers_template.go @@ -37,14 +37,14 @@ server { rewrite {{ $r }}; {{- end }} - {{- if $l.Return -}} + {{- if $l.Return }} return {{ $l.Return.Code }} "{{ $l.Return.Body }}"; - {{ end }} + {{- end }} - {{- if $l.HTTPMatchVar -}} + {{- if $l.HTTPMatchVar }} set $http_matches {{ $l.HTTPMatchVar | printf "%q" }}; js_content httpmatches.redirect; - {{ end }} + {{- end }} {{- if $l.ProxyPass -}} {{ range $h := $l.ProxySetHeaders }} diff --git a/internal/mode/static/nginx/config/upstreams.go b/internal/mode/static/nginx/config/upstreams.go index 75f36e3ae9..5d941890f3 100644 --- a/internal/mode/static/nginx/config/upstreams.go +++ b/internal/mode/static/nginx/config/upstreams.go @@ -17,20 +17,26 @@ const ( nginx500Server = "unix:/var/lib/nginx/nginx-500-server.sock" // invalidBackendRef is used as an upstream name for invalid backend references. invalidBackendRef = "invalid-backend-ref" + // ossZoneSize is the upstream zone size for nginx open source. + ossZoneSize = "512k" + // plusZoneSize is the upstream zone size for nginx plus. + plusZoneSize = "1m" + // invalidBackendZoneSize is the upstream zone size for the invalid backend upstream. + invalidBackendZoneSize = "32k" ) -func executeUpstreams(conf dataplane.Configuration) []byte { - upstreams := createUpstreams(conf.Upstreams) +func (g GeneratorImpl) executeUpstreams(conf dataplane.Configuration) []byte { + upstreams := g.createUpstreams(conf.Upstreams) return execute(upstreamsTemplate, upstreams) } -func createUpstreams(upstreams []dataplane.Upstream) []http.Upstream { +func (g GeneratorImpl) createUpstreams(upstreams []dataplane.Upstream) []http.Upstream { // capacity is the number of upstreams + 1 for the invalid backend ref upstream ups := make([]http.Upstream, 0, len(upstreams)+1) for _, u := range upstreams { - ups = append(ups, createUpstream(u)) + ups = append(ups, g.createUpstream(u)) } ups = append(ups, createInvalidBackendRefUpstream()) @@ -38,10 +44,16 @@ func createUpstreams(upstreams []dataplane.Upstream) []http.Upstream { return ups } -func createUpstream(up dataplane.Upstream) http.Upstream { +func (g GeneratorImpl) createUpstream(up dataplane.Upstream) http.Upstream { + zoneSize := ossZoneSize + if g.plus { + zoneSize = plusZoneSize + } + if len(up.Endpoints) == 0 { return http.Upstream{ - Name: up.Name, + Name: up.Name, + ZoneSize: zoneSize, Servers: []http.UpstreamServer{ { Address: nginx502Server, @@ -58,14 +70,16 @@ func createUpstream(up dataplane.Upstream) http.Upstream { } return http.Upstream{ - Name: up.Name, - Servers: upstreamServers, + Name: up.Name, + ZoneSize: zoneSize, + Servers: upstreamServers, } } func createInvalidBackendRefUpstream() http.Upstream { return http.Upstream{ - Name: invalidBackendRef, + Name: invalidBackendRef, + ZoneSize: invalidBackendZoneSize, Servers: []http.UpstreamServer{ { Address: nginx500Server, diff --git a/internal/mode/static/nginx/config/upstreams_template.go b/internal/mode/static/nginx/config/upstreams_template.go index bf28a6e2e7..4bec3b9e11 100644 --- a/internal/mode/static/nginx/config/upstreams_template.go +++ b/internal/mode/static/nginx/config/upstreams_template.go @@ -1,13 +1,14 @@ package config // FIXME(kate-osborn): Dynamically calculate upstream zone size based on the number of upstreams. -// 512k will support up to 648 upstream servers. +// 512k will support up to 648 upstream servers for OSS. +// NGINX Plus needs 1m to support roughly the same amount of servers. // https://github.com/nginxinc/nginx-gateway-fabric/issues/483 var upstreamsTemplateText = ` {{ range $u := . }} upstream {{ $u.Name }} { random two least_conn; - zone {{ $u.Name }} 512k; + zone {{ $u.Name }} {{ $u.ZoneSize }}; {{ range $server := $u.Servers }} server {{ $server.Address }}; {{- end }} diff --git a/internal/mode/static/nginx/config/upstreams_test.go b/internal/mode/static/nginx/config/upstreams_test.go index 6ec0bbfd48..851dd776b1 100644 --- a/internal/mode/static/nginx/config/upstreams_test.go +++ b/internal/mode/static/nginx/config/upstreams_test.go @@ -11,6 +11,7 @@ import ( ) func TestExecuteUpstreams(t *testing.T) { + gen := GeneratorImpl{} stateUpstreams := []dataplane.Upstream{ { Name: "up1", @@ -46,7 +47,7 @@ func TestExecuteUpstreams(t *testing.T) { "server unix:/var/lib/nginx/nginx-502-server.sock;", } - upstreams := string(executeUpstreams(dataplane.Configuration{Upstreams: stateUpstreams})) + upstreams := string(gen.executeUpstreams(dataplane.Configuration{Upstreams: stateUpstreams})) g := NewWithT(t) for _, expSubString := range expectedSubStrings { g.Expect(upstreams).To(ContainSubstring(expSubString)) @@ -54,6 +55,7 @@ func TestExecuteUpstreams(t *testing.T) { } func TestCreateUpstreams(t *testing.T) { + gen := GeneratorImpl{} stateUpstreams := []dataplane.Upstream{ { Name: "up1", @@ -89,7 +91,8 @@ func TestCreateUpstreams(t *testing.T) { expUpstreams := []http.Upstream{ { - Name: "up1", + Name: "up1", + ZoneSize: ossZoneSize, Servers: []http.UpstreamServer{ { Address: "10.0.0.0:80", @@ -103,7 +106,8 @@ func TestCreateUpstreams(t *testing.T) { }, }, { - Name: "up2", + Name: "up2", + ZoneSize: ossZoneSize, Servers: []http.UpstreamServer{ { Address: "11.0.0.0:80", @@ -111,7 +115,8 @@ func TestCreateUpstreams(t *testing.T) { }, }, { - Name: "up3", + Name: "up3", + ZoneSize: ossZoneSize, Servers: []http.UpstreamServer{ { Address: nginx502Server, @@ -119,7 +124,8 @@ func TestCreateUpstreams(t *testing.T) { }, }, { - Name: invalidBackendRef, + Name: invalidBackendRef, + ZoneSize: invalidBackendZoneSize, Servers: []http.UpstreamServer{ { Address: nginx500Server, @@ -129,11 +135,12 @@ func TestCreateUpstreams(t *testing.T) { } g := NewWithT(t) - result := createUpstreams(stateUpstreams) + result := gen.createUpstreams(stateUpstreams) g.Expect(result).To(Equal(expUpstreams)) } func TestCreateUpstream(t *testing.T) { + gen := GeneratorImpl{} tests := []struct { msg string stateUpstream dataplane.Upstream @@ -145,7 +152,8 @@ func TestCreateUpstream(t *testing.T) { Endpoints: nil, }, expectedUpstream: http.Upstream{ - Name: "nil-endpoints", + Name: "nil-endpoints", + ZoneSize: ossZoneSize, Servers: []http.UpstreamServer{ { Address: nginx502Server, @@ -160,7 +168,8 @@ func TestCreateUpstream(t *testing.T) { Endpoints: []resolver.Endpoint{}, }, expectedUpstream: http.Upstream{ - Name: "no-endpoints", + Name: "no-endpoints", + ZoneSize: ossZoneSize, Servers: []http.UpstreamServer{ { Address: nginx502Server, @@ -188,7 +197,8 @@ func TestCreateUpstream(t *testing.T) { }, }, expectedUpstream: http.Upstream{ - Name: "multiple-endpoints", + Name: "multiple-endpoints", + ZoneSize: ossZoneSize, Servers: []http.UpstreamServer{ { Address: "10.0.0.1:80", @@ -208,8 +218,36 @@ func TestCreateUpstream(t *testing.T) { for _, test := range tests { t.Run(test.msg, func(t *testing.T) { g := NewWithT(t) - result := createUpstream(test.stateUpstream) + result := gen.createUpstream(test.stateUpstream) g.Expect(result).To(Equal(test.expectedUpstream)) }) } } + +func TestCreateUpstreamPlus(t *testing.T) { + gen := GeneratorImpl{plus: true} + + stateUpstream := dataplane.Upstream{ + Name: "multiple-endpoints", + Endpoints: []resolver.Endpoint{ + { + Address: "10.0.0.1", + Port: 80, + }, + }, + } + expectedUpstream := http.Upstream{ + Name: "multiple-endpoints", + ZoneSize: plusZoneSize, + Servers: []http.UpstreamServer{ + { + Address: "10.0.0.1:80", + }, + }, + } + + result := gen.createUpstream(stateUpstream) + + g := NewWithT(t) + g.Expect(result).To(Equal(expectedUpstream)) +} diff --git a/internal/mode/static/nginx/runtime/clients.go b/internal/mode/static/nginx/runtime/clients.go new file mode 100644 index 0000000000..a01a8ef09f --- /dev/null +++ b/internal/mode/static/nginx/runtime/clients.go @@ -0,0 +1,39 @@ +package runtime + +import ( + "context" + "fmt" + "net" + "net/http" + + "github.com/nginxinc/nginx-plus-go-client/client" +) + +const ( + nginxPlusAPISock = "/var/run/nginx/nginx-plus-api.sock" + nginxPlusAPIURI = "http://nginx-plus-api/api" +) + +// CreatePlusClient returns a client for communicating with the NGINX Plus API. +func CreatePlusClient() (*client.NginxClient, error) { + var plusClient *client.NginxClient + var err error + + httpClient := GetSocketClient(nginxPlusAPISock) + plusClient, err = client.NewNginxClient(nginxPlusAPIURI, client.WithHTTPClient(&httpClient)) + if err != nil { + return nil, fmt.Errorf("failed to create NginxClient for Plus: %w", err) + } + return plusClient, nil +} + +// GetSocketClient gets an http.Client with a unix socket transport. +func GetSocketClient(sockPath string) http.Client { + return http.Client{ + Transport: &http.Transport{ + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", sockPath) + }, + }, + } +} diff --git a/internal/mode/static/nginx/runtime/manager.go b/internal/mode/static/nginx/runtime/manager.go index 48e0e0defa..ee20146023 100644 --- a/internal/mode/static/nginx/runtime/manager.go +++ b/internal/mode/static/nginx/runtime/manager.go @@ -11,6 +11,8 @@ import ( "syscall" "time" + "github.com/go-logr/logr" + ngxclient "github.com/nginxinc/nginx-plus-go-client/client" "k8s.io/apimachinery/pkg/util/wait" ) @@ -33,6 +35,14 @@ var childProcPathFmt = "/proc/%[1]v/task/%[1]v/children" type Manager interface { // Reload reloads NGINX configuration. It is a blocking operation. Reload(ctx context.Context, configVersion int) error + // IsPlus returns whether or not we are running NGINX plus. + IsPlus() bool + // UpdateHTTPServers uses the NGINX Plus API to update HTTP servers. + // Only usable if running NGINX Plus. + UpdateHTTPServers(string, []ngxclient.UpstreamServer) error + // GetUpstreams uses the NGINX Plus API to get the upstreams. + // Only usable if running NGINX Plus. + GetUpstreams() (ngxclient.Upstreams, error) } // MetricsCollector is an interface for the metrics of the NGINX runtime manager. @@ -46,16 +56,29 @@ type MetricsCollector interface { type ManagerImpl struct { verifyClient *verifyClient metricsCollector MetricsCollector + ngxPlusClient *ngxclient.NginxClient + logger logr.Logger } // NewManagerImpl creates a new ManagerImpl. -func NewManagerImpl(collector MetricsCollector) *ManagerImpl { +func NewManagerImpl( + ngxPlusClient *ngxclient.NginxClient, + collector MetricsCollector, + logger logr.Logger, +) *ManagerImpl { return &ManagerImpl{ verifyClient: newVerifyClient(nginxReloadTimeout), metricsCollector: collector, + ngxPlusClient: ngxPlusClient, + logger: logger, } } +// IsPlus returns whether or not we are running NGINX plus. +func (m *ManagerImpl) IsPlus() bool { + return m.ngxPlusClient != nil +} + func (m *ManagerImpl) Reload(ctx context.Context, configVersion int) error { start := time.Now() // We find the main NGINX PID on every reload because it will change if the NGINX container is restarted. @@ -94,6 +117,40 @@ func (m *ManagerImpl) Reload(ctx context.Context, configVersion int) error { return nil } +// UpdateHTTPServers uses the NGINX Plus API to update HTTP upstream servers. +// Only usable if running NGINX Plus. +func (m *ManagerImpl) UpdateHTTPServers(upstream string, servers []ngxclient.UpstreamServer) error { + if !m.IsPlus() { + panic("cannot update HTTP upstream servers: NGINX Plus not enabled") + } + + added, deleted, updated, err := m.ngxPlusClient.UpdateHTTPServers(upstream, servers) + m.logger.V(1).Info("Added upstream servers", "count", len(added)) + m.logger.V(1).Info("Deleted upstream servers", "count", len(deleted)) + m.logger.V(1).Info("Updated upstream servers", "count", len(updated)) + + return err +} + +// GetUpstreams uses the NGINX Plus API to get the upstreams. +// Only usable if running NGINX Plus. +func (m *ManagerImpl) GetUpstreams() (ngxclient.Upstreams, error) { + if !m.IsPlus() { + panic("cannot get HTTP upstream servers: NGINX Plus not enabled") + } + + upstreams, err := m.ngxPlusClient.GetUpstreams() + if err != nil { + return nil, err + } + + if upstreams == nil { + return nil, errors.New("GET upstreams returned nil value") + } + + return *upstreams, nil +} + // EnsureNginxRunning ensures NGINX is running by locating the main process. func EnsureNginxRunning(ctx context.Context) error { if _, err := findMainProcess(ctx, os.Stat, os.ReadFile, pidFileTimeout); err != nil { diff --git a/internal/mode/static/nginx/runtime/manager_test.go b/internal/mode/static/nginx/runtime/manager_test.go index 3ce479a21a..323bb514a5 100644 --- a/internal/mode/static/nginx/runtime/manager_test.go +++ b/internal/mode/static/nginx/runtime/manager_test.go @@ -7,9 +7,22 @@ import ( "testing" "time" + "github.com/go-logr/logr" + ngxclient "github.com/nginxinc/nginx-plus-go-client/client" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) +var _ = Describe("NGINX Runtime Manager", func() { + It("returns whether or not we're using NGINX Plus", func() { + mgr := NewManagerImpl(nil, nil, logr.New(GinkgoLogr.GetSink())) + Expect(mgr.IsPlus()).To(BeFalse()) + + mgr = NewManagerImpl(&ngxclient.NginxClient{}, nil, logr.New(GinkgoLogr.GetSink())) + Expect(mgr.IsPlus()).To(BeTrue()) + }) +}) + func TestFindMainProcess(t *testing.T) { readFileFuncGen := func(content []byte) readFileFunc { return func(name string) ([]byte, error) { diff --git a/internal/mode/static/nginx/runtime/runtime_suite_test.go b/internal/mode/static/nginx/runtime/runtime_suite_test.go new file mode 100644 index 0000000000..7a31e6ebf4 --- /dev/null +++ b/internal/mode/static/nginx/runtime/runtime_suite_test.go @@ -0,0 +1,13 @@ +package runtime_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestRuntime(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Runtime Suite") +} diff --git a/internal/mode/static/nginx/runtime/runtimefakes/fake_manager.go b/internal/mode/static/nginx/runtime/runtimefakes/fake_manager.go index f841eaf099..2538e32de3 100644 --- a/internal/mode/static/nginx/runtime/runtimefakes/fake_manager.go +++ b/internal/mode/static/nginx/runtime/runtimefakes/fake_manager.go @@ -6,9 +6,32 @@ import ( "sync" "github.com/nginxinc/nginx-gateway-fabric/internal/mode/static/nginx/runtime" + "github.com/nginxinc/nginx-plus-go-client/client" ) type FakeManager struct { + GetUpstreamsStub func() (client.Upstreams, error) + getUpstreamsMutex sync.RWMutex + getUpstreamsArgsForCall []struct { + } + getUpstreamsReturns struct { + result1 client.Upstreams + result2 error + } + getUpstreamsReturnsOnCall map[int]struct { + result1 client.Upstreams + result2 error + } + IsPlusStub func() bool + isPlusMutex sync.RWMutex + isPlusArgsForCall []struct { + } + isPlusReturns struct { + result1 bool + } + isPlusReturnsOnCall map[int]struct { + result1 bool + } ReloadStub func(context.Context, int) error reloadMutex sync.RWMutex reloadArgsForCall []struct { @@ -21,10 +44,131 @@ type FakeManager struct { reloadReturnsOnCall map[int]struct { result1 error } + UpdateHTTPServersStub func(string, []client.UpstreamServer) error + updateHTTPServersMutex sync.RWMutex + updateHTTPServersArgsForCall []struct { + arg1 string + arg2 []client.UpstreamServer + } + updateHTTPServersReturns struct { + result1 error + } + updateHTTPServersReturnsOnCall map[int]struct { + result1 error + } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } +func (fake *FakeManager) GetUpstreams() (client.Upstreams, error) { + fake.getUpstreamsMutex.Lock() + ret, specificReturn := fake.getUpstreamsReturnsOnCall[len(fake.getUpstreamsArgsForCall)] + fake.getUpstreamsArgsForCall = append(fake.getUpstreamsArgsForCall, struct { + }{}) + stub := fake.GetUpstreamsStub + fakeReturns := fake.getUpstreamsReturns + fake.recordInvocation("GetUpstreams", []interface{}{}) + fake.getUpstreamsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeManager) GetUpstreamsCallCount() int { + fake.getUpstreamsMutex.RLock() + defer fake.getUpstreamsMutex.RUnlock() + return len(fake.getUpstreamsArgsForCall) +} + +func (fake *FakeManager) GetUpstreamsCalls(stub func() (client.Upstreams, error)) { + fake.getUpstreamsMutex.Lock() + defer fake.getUpstreamsMutex.Unlock() + fake.GetUpstreamsStub = stub +} + +func (fake *FakeManager) GetUpstreamsReturns(result1 client.Upstreams, result2 error) { + fake.getUpstreamsMutex.Lock() + defer fake.getUpstreamsMutex.Unlock() + fake.GetUpstreamsStub = nil + fake.getUpstreamsReturns = struct { + result1 client.Upstreams + result2 error + }{result1, result2} +} + +func (fake *FakeManager) GetUpstreamsReturnsOnCall(i int, result1 client.Upstreams, result2 error) { + fake.getUpstreamsMutex.Lock() + defer fake.getUpstreamsMutex.Unlock() + fake.GetUpstreamsStub = nil + if fake.getUpstreamsReturnsOnCall == nil { + fake.getUpstreamsReturnsOnCall = make(map[int]struct { + result1 client.Upstreams + result2 error + }) + } + fake.getUpstreamsReturnsOnCall[i] = struct { + result1 client.Upstreams + result2 error + }{result1, result2} +} + +func (fake *FakeManager) IsPlus() bool { + fake.isPlusMutex.Lock() + ret, specificReturn := fake.isPlusReturnsOnCall[len(fake.isPlusArgsForCall)] + fake.isPlusArgsForCall = append(fake.isPlusArgsForCall, struct { + }{}) + stub := fake.IsPlusStub + fakeReturns := fake.isPlusReturns + fake.recordInvocation("IsPlus", []interface{}{}) + fake.isPlusMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeManager) IsPlusCallCount() int { + fake.isPlusMutex.RLock() + defer fake.isPlusMutex.RUnlock() + return len(fake.isPlusArgsForCall) +} + +func (fake *FakeManager) IsPlusCalls(stub func() bool) { + fake.isPlusMutex.Lock() + defer fake.isPlusMutex.Unlock() + fake.IsPlusStub = stub +} + +func (fake *FakeManager) IsPlusReturns(result1 bool) { + fake.isPlusMutex.Lock() + defer fake.isPlusMutex.Unlock() + fake.IsPlusStub = nil + fake.isPlusReturns = struct { + result1 bool + }{result1} +} + +func (fake *FakeManager) IsPlusReturnsOnCall(i int, result1 bool) { + fake.isPlusMutex.Lock() + defer fake.isPlusMutex.Unlock() + fake.IsPlusStub = nil + if fake.isPlusReturnsOnCall == nil { + fake.isPlusReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.isPlusReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + func (fake *FakeManager) Reload(arg1 context.Context, arg2 int) error { fake.reloadMutex.Lock() ret, specificReturn := fake.reloadReturnsOnCall[len(fake.reloadArgsForCall)] @@ -87,11 +231,84 @@ func (fake *FakeManager) ReloadReturnsOnCall(i int, result1 error) { }{result1} } +func (fake *FakeManager) UpdateHTTPServers(arg1 string, arg2 []client.UpstreamServer) error { + var arg2Copy []client.UpstreamServer + if arg2 != nil { + arg2Copy = make([]client.UpstreamServer, len(arg2)) + copy(arg2Copy, arg2) + } + fake.updateHTTPServersMutex.Lock() + ret, specificReturn := fake.updateHTTPServersReturnsOnCall[len(fake.updateHTTPServersArgsForCall)] + fake.updateHTTPServersArgsForCall = append(fake.updateHTTPServersArgsForCall, struct { + arg1 string + arg2 []client.UpstreamServer + }{arg1, arg2Copy}) + stub := fake.UpdateHTTPServersStub + fakeReturns := fake.updateHTTPServersReturns + fake.recordInvocation("UpdateHTTPServers", []interface{}{arg1, arg2Copy}) + fake.updateHTTPServersMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeManager) UpdateHTTPServersCallCount() int { + fake.updateHTTPServersMutex.RLock() + defer fake.updateHTTPServersMutex.RUnlock() + return len(fake.updateHTTPServersArgsForCall) +} + +func (fake *FakeManager) UpdateHTTPServersCalls(stub func(string, []client.UpstreamServer) error) { + fake.updateHTTPServersMutex.Lock() + defer fake.updateHTTPServersMutex.Unlock() + fake.UpdateHTTPServersStub = stub +} + +func (fake *FakeManager) UpdateHTTPServersArgsForCall(i int) (string, []client.UpstreamServer) { + fake.updateHTTPServersMutex.RLock() + defer fake.updateHTTPServersMutex.RUnlock() + argsForCall := fake.updateHTTPServersArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeManager) UpdateHTTPServersReturns(result1 error) { + fake.updateHTTPServersMutex.Lock() + defer fake.updateHTTPServersMutex.Unlock() + fake.UpdateHTTPServersStub = nil + fake.updateHTTPServersReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeManager) UpdateHTTPServersReturnsOnCall(i int, result1 error) { + fake.updateHTTPServersMutex.Lock() + defer fake.updateHTTPServersMutex.Unlock() + fake.UpdateHTTPServersStub = nil + if fake.updateHTTPServersReturnsOnCall == nil { + fake.updateHTTPServersReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateHTTPServersReturnsOnCall[i] = struct { + result1 error + }{result1} +} + func (fake *FakeManager) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() + fake.getUpstreamsMutex.RLock() + defer fake.getUpstreamsMutex.RUnlock() + fake.isPlusMutex.RLock() + defer fake.isPlusMutex.RUnlock() fake.reloadMutex.RLock() defer fake.reloadMutex.RUnlock() + fake.updateHTTPServersMutex.RLock() + defer fake.updateHTTPServersMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/internal/mode/static/state/change_processor.go b/internal/mode/static/state/change_processor.go index ff0034636d..93229e0d45 100644 --- a/internal/mode/static/state/change_processor.go +++ b/internal/mode/static/state/change_processor.go @@ -16,9 +16,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" v1 "sigs.k8s.io/gateway-api/apis/v1" - "sigs.k8s.io/gateway-api/apis/v1beta1" - gwapivalidation "sigs.k8s.io/gateway-api/apis/v1/validation" + "sigs.k8s.io/gateway-api/apis/v1beta1" "github.com/nginxinc/nginx-gateway-fabric/internal/framework/gatewayclass" "github.com/nginxinc/nginx-gateway-fabric/internal/mode/static/state/graph" @@ -32,6 +31,19 @@ const ( "webhook is running correctly." ) +// ChangeType is the type of change that occurred based on a k8s object event. +type ChangeType int + +const ( + // NoChange means that nothing changed. + NoChange ChangeType = iota + // EndpointsOnlyChange means that only the endpoints changed. + // If using NGINX Plus, this update can be done using the API without a reload. + EndpointsOnlyChange + // ClusterStateChange means that something other than endpoints changed. This requires an NGINX reload. + ClusterStateChange +) + //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . ChangeProcessor type extractGVKFunc func(obj client.Object) schema.GroupVersionKind @@ -48,8 +60,8 @@ type ChangeProcessor interface { // this ChangeProcessor was created for. CaptureDeleteChange(resourceType client.Object, nsname types.NamespacedName) // Process produces a graph-like representation of GatewayAPI resources. - // If no changes were captured, the changed return argument will be false and graph will be empty. - Process() (changed bool, graphCfg *graph.Graph) + // If no changes were captured, the changed return argument will be NoChange and graph will be empty. + Process() (changeType ChangeType, graphCfg *graph.Graph) } // ChangeProcessorConfig holds configuration parameters for ChangeProcessorImpl. @@ -78,8 +90,8 @@ type ChangeProcessorImpl struct { clusterState graph.ClusterState // updater acts upon the cluster state. updater Updater - // getAndResetClusterStateChanged tells if the cluster state has changed. - getAndResetClusterStateChanged func() bool + // getAndResetClusterStateChanged tells if and how the cluster state has changed. + getAndResetClusterStateChanged func() ChangeType cfg ChangeProcessorConfig lock sync.Mutex @@ -225,12 +237,13 @@ func (c *ChangeProcessorImpl) CaptureDeleteChange(resourceType client.Object, ns c.updater.Delete(resourceType, nsname) } -func (c *ChangeProcessorImpl) Process() (bool, *graph.Graph) { +func (c *ChangeProcessorImpl) Process() (ChangeType, *graph.Graph) { c.lock.Lock() defer c.lock.Unlock() - if !c.getAndResetClusterStateChanged() { - return false, nil + changeType := c.getAndResetClusterStateChanged() + if changeType == NoChange { + return NoChange, nil } c.latestGraph = graph.BuildGraph( @@ -241,5 +254,5 @@ func (c *ChangeProcessorImpl) Process() (bool, *graph.Graph) { c.cfg.ProtectedPorts, ) - return true, c.latestGraph + return changeType, c.latestGraph } diff --git a/internal/mode/static/state/change_processor_test.go b/internal/mode/static/state/change_processor_test.go index 0376b43463..8c009c20d4 100644 --- a/internal/mode/static/state/change_processor_test.go +++ b/internal/mode/static/state/change_processor_test.go @@ -518,7 +518,7 @@ var _ = Describe("ChangeProcessor", func() { When("no upsert has occurred", func() { It("returns nil graph", func() { changed, graphCfg := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) Expect(graphCfg).To(BeNil()) }) }) @@ -528,7 +528,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(gatewayAPICRD) changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(&graph.Graph{}, graphCfg)).To(BeEmpty()) }) }) @@ -538,7 +538,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(hr1) changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(&graph.Graph{}, graphCfg)).To(BeEmpty()) }) }) @@ -547,7 +547,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(diffNsTLSSecret) changed, graphCfg := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) Expect(graphCfg).To(BeNil()) }) }) @@ -582,7 +582,7 @@ var _ = Describe("ChangeProcessor", func() { expRouteHR1.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -635,7 +635,7 @@ var _ = Describe("ChangeProcessor", func() { expRouteHR1.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -657,7 +657,7 @@ var _ = Describe("ChangeProcessor", func() { expRouteHR1.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -670,7 +670,7 @@ var _ = Describe("ChangeProcessor", func() { } changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -687,7 +687,7 @@ var _ = Describe("ChangeProcessor", func() { ) changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -698,7 +698,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(gatewayAPICRDSameVersion) changed, graphCfg := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) Expect(graphCfg).To(BeNil()) }) }) @@ -712,7 +712,7 @@ var _ = Describe("ChangeProcessor", func() { } changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -730,7 +730,7 @@ var _ = Describe("ChangeProcessor", func() { } changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }, ) @@ -745,7 +745,7 @@ var _ = Describe("ChangeProcessor", func() { } changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -759,7 +759,7 @@ var _ = Describe("ChangeProcessor", func() { } changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -772,7 +772,7 @@ var _ = Describe("ChangeProcessor", func() { } changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -780,7 +780,7 @@ var _ = Describe("ChangeProcessor", func() { It("returns nil graph", func() { changed, graphCfg := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) Expect(graphCfg).To(BeNil()) }) }) @@ -790,7 +790,7 @@ var _ = Describe("ChangeProcessor", func() { changed, graphCfg := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) Expect(graphCfg).To(BeNil()) }) }) @@ -806,7 +806,7 @@ var _ = Describe("ChangeProcessor", func() { } changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -831,7 +831,7 @@ var _ = Describe("ChangeProcessor", func() { } changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -866,7 +866,7 @@ var _ = Describe("ChangeProcessor", func() { expGraph.ReferencedServices = nil changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -898,7 +898,7 @@ var _ = Describe("ChangeProcessor", func() { expGraph.ReferencedServices = nil changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -921,7 +921,7 @@ var _ = Describe("ChangeProcessor", func() { expGraph.ReferencedServices = nil changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) }) }) @@ -936,7 +936,7 @@ var _ = Describe("ChangeProcessor", func() { expGraph.ReferencedServices = nil changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(&graph.Graph{}, graphCfg)).To(BeEmpty()) }) }) @@ -951,7 +951,7 @@ var _ = Describe("ChangeProcessor", func() { expGraph.ReferencedServices = nil changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(helpers.Diff(&graph.Graph{}, graphCfg)).To(BeEmpty()) }) }) @@ -1033,52 +1033,52 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(gc) processor.CaptureUpsertChange(gw) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) - testProcessChangedVal := func(expChanged bool) { + testProcessChangedVal := func(expChanged state.ChangeType) { changed, _ := processor.Process() Expect(changed).To(Equal(expChanged)) } - testUpsertTriggersChange := func(obj client.Object, expChanged bool) { + testUpsertTriggersChange := func(obj client.Object, expChanged state.ChangeType) { processor.CaptureUpsertChange(obj) testProcessChangedVal(expChanged) } - testDeleteTriggersChange := func(obj client.Object, nsname types.NamespacedName, expChanged bool) { + testDeleteTriggersChange := func(obj client.Object, nsname types.NamespacedName, expChanged state.ChangeType) { processor.CaptureDeleteChange(obj, nsname) testProcessChangedVal(expChanged) } When("hr1 is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1, true) + testUpsertTriggersChange(hr1, state.ClusterStateChange) }) }) When("a hr1 service is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1svc, true) + testUpsertTriggersChange(hr1svc, state.ClusterStateChange) }) }) When("an hr1 endpoint slice is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1slice1, true) + testUpsertTriggersChange(hr1slice1, state.EndpointsOnlyChange) }) }) When("an hr1 service is updated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1svc, true) + testUpsertTriggersChange(hr1svc, state.ClusterStateChange) }) }) When("another hr1 endpoint slice is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1slice2, true) + testUpsertTriggersChange(hr1slice2, state.EndpointsOnlyChange) }) }) When("an endpoint slice with a missing svc name label is added", func() { It("should not trigger a change", func() { - testUpsertTriggersChange(missingSvcNameSlice, false) + testUpsertTriggersChange(missingSvcNameSlice, state.NoChange) }) }) When("an hr1 endpoint slice is deleted", func() { @@ -1086,7 +1086,7 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr1slice1, types.NamespacedName{Namespace: hr1slice1.Namespace, Name: hr1slice1.Name}, - true, + state.EndpointsOnlyChange, ) }) }) @@ -1095,13 +1095,13 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr1slice2, types.NamespacedName{Namespace: hr1slice2.Namespace, Name: hr1slice2.Name}, - true, + state.EndpointsOnlyChange, ) }) }) When("the second hr1 endpoint slice is recreated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1slice2, true) + testUpsertTriggersChange(hr1slice2, state.EndpointsOnlyChange) }) }) When("hr1 is deleted", func() { @@ -1109,7 +1109,7 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr1, types.NamespacedName{Namespace: hr1.Namespace, Name: hr1.Name}, - true, + state.ClusterStateChange, ) }) }) @@ -1118,7 +1118,7 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr1svc, types.NamespacedName{Namespace: hr1svc.Namespace, Name: hr1svc.Name}, - false, + state.NoChange, ) }) }) @@ -1127,23 +1127,23 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr1slice2, types.NamespacedName{Namespace: hr1slice2.Namespace, Name: hr1slice2.Name}, - false, + state.NoChange, ) }) }) When("hr2 is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr2, true) + testUpsertTriggersChange(hr2, state.ClusterStateChange) }) }) When("a hr3, that shares a backend service with hr2, is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr3, true) + testUpsertTriggersChange(hr3, state.ClusterStateChange) }) }) When("sharedSvc, a service referenced by both hr2 and hr3, is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(sharedSvc, true) + testUpsertTriggersChange(sharedSvc, state.ClusterStateChange) }) }) When("hr2 is deleted", func() { @@ -1151,7 +1151,7 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr2, types.NamespacedName{Namespace: hr2.Namespace, Name: hr2.Name}, - true, + state.ClusterStateChange, ) }) }) @@ -1160,13 +1160,13 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( sharedSvc, types.NamespacedName{Namespace: sharedSvc.Namespace, Name: sharedSvc.Name}, - true, + state.ClusterStateChange, ) }) }) When("sharedSvc is recreated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(sharedSvc, true) + testUpsertTriggersChange(sharedSvc, state.ClusterStateChange) }) }) When("hr3 is deleted", func() { @@ -1174,7 +1174,7 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr3, types.NamespacedName{Namespace: hr3.Namespace, Name: hr3.Name}, - true, + state.ClusterStateChange, ) }) }) @@ -1183,28 +1183,28 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( sharedSvc, types.NamespacedName{Namespace: sharedSvc.Namespace, Name: sharedSvc.Name}, - false, + state.NoChange, ) }) }) When("a service that is not referenced by any route is added", func() { It("should not trigger a change", func() { - testUpsertTriggersChange(notRefSvc, false) + testUpsertTriggersChange(notRefSvc, state.NoChange) }) }) When("a route with an invalid backend ref type is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hrInvalidBackendRef, true) + testUpsertTriggersChange(hrInvalidBackendRef, state.ClusterStateChange) }) }) When("a service with a namespace name that matches invalid backend ref is added", func() { It("should not trigger a change", func() { - testUpsertTriggersChange(invalidSvc, false) + testUpsertTriggersChange(invalidSvc, state.NoChange) }) }) When("an endpoint slice that is not owned by a referenced service is added", func() { It("should not trigger a change", func() { - testUpsertTriggersChange(noRefSlice, false) + testUpsertTriggersChange(noRefSlice, state.NoChange) }) }) When("an endpoint slice that is not owned by a referenced service is deleted", func() { @@ -1212,24 +1212,24 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( noRefSlice, types.NamespacedName{Namespace: noRefSlice.Namespace, Name: noRefSlice.Name}, - false, + state.NoChange, ) }) }) Context("processing a route with multiple rules and three unique backend services", func() { When("route is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hrMultipleRules, true) + testUpsertTriggersChange(hrMultipleRules, state.ClusterStateChange) }) }) When("first referenced service is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc1, true) + testUpsertTriggersChange(bazSvc1, state.ClusterStateChange) }) }) When("second referenced service is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc2, true) + testUpsertTriggersChange(bazSvc2, state.ClusterStateChange) }) }) When("first referenced service is deleted", func() { @@ -1237,23 +1237,23 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( bazSvc1, types.NamespacedName{Namespace: bazSvc1.Namespace, Name: bazSvc1.Name}, - true, + state.ClusterStateChange, ) }) }) When("first referenced service is recreated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc1, true) + testUpsertTriggersChange(bazSvc1, state.ClusterStateChange) }) }) When("third referenced service is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc3, true) + testUpsertTriggersChange(bazSvc3, state.ClusterStateChange) }) }) When("third referenced service is updated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc3, true) + testUpsertTriggersChange(bazSvc3, state.ClusterStateChange) }) }) When("route is deleted", func() { @@ -1264,7 +1264,7 @@ var _ = Describe("ChangeProcessor", func() { Namespace: hrMultipleRules.Namespace, Name: hrMultipleRules.Name, }, - true, + state.ClusterStateChange, ) }) }) @@ -1273,7 +1273,7 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( bazSvc1, types.NamespacedName{Namespace: bazSvc1.Namespace, Name: bazSvc1.Name}, - false, + state.NoChange, ) }) }) @@ -1282,7 +1282,7 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( bazSvc2, types.NamespacedName{Namespace: bazSvc2.Namespace, Name: bazSvc2.Name}, - false, + state.NoChange, ) }) }) @@ -1291,7 +1291,7 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( bazSvc3, types.NamespacedName{Namespace: bazSvc3.Namespace, Name: bazSvc3.Name}, - false, + state.NoChange, ) }) }) @@ -1366,42 +1366,42 @@ var _ = Describe("ChangeProcessor", func() { It("does not trigger an update", func() { processor.CaptureUpsertChange(nsNoLabels) changed, _ := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) }) }) When("a namespace is created that is linked to a listener", func() { It("triggers an update", func() { processor.CaptureUpsertChange(ns) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) }) When("a namespace is deleted that is not linked to a listener", func() { It("does not trigger an update", func() { processor.CaptureDeleteChange(nsNoLabels, types.NamespacedName{Name: "no-labels"}) changed, _ := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) }) }) When("a namespace is deleted that is linked to a listener", func() { It("triggers an update", func() { processor.CaptureDeleteChange(ns, types.NamespacedName{Name: "ns"}) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) }) When("a namespace that is not linked to a listener has its labels changed to match a listener", func() { It("triggers an update", func() { processor.CaptureUpsertChange(nsDifferentLabels) changed, _ := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) nsDifferentLabels.Labels = map[string]string{ "app": "allowed", } processor.CaptureUpsertChange(nsDifferentLabels) changed, _ = processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) }) When("a namespace that is linked to a listener has its labels changed to no longer match a listener", func() { @@ -1411,7 +1411,7 @@ var _ = Describe("ChangeProcessor", func() { } processor.CaptureUpsertChange(nsDifferentLabels) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) }) When("a gateway changes its listener's labels", func() { @@ -1423,7 +1423,7 @@ var _ = Describe("ChangeProcessor", func() { gwChangedLabel.Generation++ processor.CaptureUpsertChange(gwChangedLabel) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) // After changing the gateway's labels and generation, the processor should be marked to update // the nginx configuration and build a new graph. When processor.Process() gets called, @@ -1434,11 +1434,11 @@ var _ = Describe("ChangeProcessor", func() { // gateway. processor.CaptureUpsertChange(ns) changed, _ = processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) processor.CaptureUpsertChange(nsDifferentLabels) changed, _ = processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) }) When("a namespace that is not linked to a listener has its labels removed", func() { @@ -1446,7 +1446,7 @@ var _ = Describe("ChangeProcessor", func() { ns.Labels = nil processor.CaptureUpsertChange(ns) changed, _ := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) }) }) When("a namespace that is linked to a listener has its labels removed", func() { @@ -1454,7 +1454,7 @@ var _ = Describe("ChangeProcessor", func() { nsDifferentLabels.Labels = nil processor.CaptureUpsertChange(nsDifferentLabels) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) }) }) @@ -1734,7 +1734,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(rg1) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) When("a upsert of updated resources is followed by an upsert of the same generation", func() { It("should report changed", func() { @@ -1751,7 +1751,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(rg1Updated) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) }) It("should report changed after upserting new resources", func() { @@ -1761,7 +1761,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(rg2) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) When("resources are deleted followed by upserts with the same generations", func() { It("should report changed", func() { @@ -1777,14 +1777,14 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(rg2) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) }) It("should report changed after deleting resources", func() { processor.CaptureDeleteChange(&v1.HTTPRoute{}, hr2NsName) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) }) Describe("Deleting non-existing Gateway API resource", func() { @@ -1796,7 +1796,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureDeleteChange(&v1beta1.ReferenceGrant{}, rgNsName) changed, _ := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) }) }) Describe("Multiple Kubernetes API resource changes", Ordered, func() { @@ -1809,7 +1809,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(secret) processor.CaptureUpsertChange(barSecret) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) It("should report changed after multiple Upserts of related resources", func() { @@ -1818,7 +1818,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(ns) processor.CaptureUpsertChange(secretUpdated) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) It("should report not changed after multiple Upserts of unrelated resources", func() { processor.CaptureUpsertChange(unrelatedSvc) @@ -1827,7 +1827,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(unrelatedSecret) changed, _ := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) }) When("upserts of related resources are followed by upserts of unrelated resources", func() { It("should report changed", func() { @@ -1844,7 +1844,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(unrelatedSecret) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) }) When("deletes of related resources are followed by upserts of unrelated resources", func() { @@ -1862,7 +1862,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(unrelatedSecret) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) }) }) @@ -1882,7 +1882,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(secret) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }) It("should report not changed after multiple Upserts of unrelated resources", func() { // unrelated Kubernetes API resources @@ -1892,7 +1892,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(unrelatedSecret) changed, _ := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) }) It("should report changed after upserting changed resources followed by upserting unrelated resources", func() { @@ -1909,7 +1909,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(unrelatedSecret) changed, _ := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) }, ) }) @@ -2029,7 +2029,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(gc) changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(graphCfg.GatewayClass).ToNot(BeNil()) Expect(fakeEventRecorder.Events).To(HaveLen(0)) }) @@ -2041,7 +2041,7 @@ var _ = Describe("ChangeProcessor", func() { changed, graphCfg := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) Expect(graphCfg).To(BeNil()) Expect(fakeEventRecorder.Events).To(HaveLen(2)) @@ -2057,7 +2057,7 @@ var _ = Describe("ChangeProcessor", func() { changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(graphCfg).ToNot(BeNil()) Expect(graphCfg.Gateway).ToNot(BeNil()) Expect(graphCfg.Routes).To(HaveLen(1)) @@ -2072,7 +2072,7 @@ var _ = Describe("ChangeProcessor", func() { changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(graphCfg.Routes).To(HaveLen(0)) Expect(fakeEventRecorder.Events).To(HaveLen(1)) @@ -2086,7 +2086,7 @@ var _ = Describe("ChangeProcessor", func() { changed, graphCfg := processor.Process() - Expect(changed).To(BeTrue()) + Expect(changed).To(Equal(state.ClusterStateChange)) Expect(graphCfg.Gateway).To(BeNil()) Expect(fakeEventRecorder.Events).To(HaveLen(1)) @@ -2143,7 +2143,7 @@ var _ = Describe("ChangeProcessor", func() { changed, graphCfg := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) Expect(graphCfg).To(BeNil()) assertRejectedEvent() @@ -2187,7 +2187,7 @@ var _ = Describe("ChangeProcessor", func() { changed, graphCfg := processor.Process() - Expect(changed).To(BeFalse()) + Expect(changed).To(Equal(state.NoChange)) Expect(graphCfg).To(BeNil()) assertRejectedEvent() diff --git a/internal/mode/static/state/statefakes/fake_change_processor.go b/internal/mode/static/state/statefakes/fake_change_processor.go index 69a69af7b9..e9a37bbd68 100644 --- a/internal/mode/static/state/statefakes/fake_change_processor.go +++ b/internal/mode/static/state/statefakes/fake_change_processor.go @@ -22,16 +22,16 @@ type FakeChangeProcessor struct { captureUpsertChangeArgsForCall []struct { arg1 client.Object } - ProcessStub func() (bool, *graph.Graph) + ProcessStub func() (state.ChangeType, *graph.Graph) processMutex sync.RWMutex processArgsForCall []struct { } processReturns struct { - result1 bool + result1 state.ChangeType result2 *graph.Graph } processReturnsOnCall map[int]struct { - result1 bool + result1 state.ChangeType result2 *graph.Graph } invocations map[string][][]interface{} @@ -103,7 +103,7 @@ func (fake *FakeChangeProcessor) CaptureUpsertChangeArgsForCall(i int) client.Ob return argsForCall.arg1 } -func (fake *FakeChangeProcessor) Process() (bool, *graph.Graph) { +func (fake *FakeChangeProcessor) Process() (state.ChangeType, *graph.Graph) { fake.processMutex.Lock() ret, specificReturn := fake.processReturnsOnCall[len(fake.processArgsForCall)] fake.processArgsForCall = append(fake.processArgsForCall, struct { @@ -127,34 +127,34 @@ func (fake *FakeChangeProcessor) ProcessCallCount() int { return len(fake.processArgsForCall) } -func (fake *FakeChangeProcessor) ProcessCalls(stub func() (bool, *graph.Graph)) { +func (fake *FakeChangeProcessor) ProcessCalls(stub func() (state.ChangeType, *graph.Graph)) { fake.processMutex.Lock() defer fake.processMutex.Unlock() fake.ProcessStub = stub } -func (fake *FakeChangeProcessor) ProcessReturns(result1 bool, result2 *graph.Graph) { +func (fake *FakeChangeProcessor) ProcessReturns(result1 state.ChangeType, result2 *graph.Graph) { fake.processMutex.Lock() defer fake.processMutex.Unlock() fake.ProcessStub = nil fake.processReturns = struct { - result1 bool + result1 state.ChangeType result2 *graph.Graph }{result1, result2} } -func (fake *FakeChangeProcessor) ProcessReturnsOnCall(i int, result1 bool, result2 *graph.Graph) { +func (fake *FakeChangeProcessor) ProcessReturnsOnCall(i int, result1 state.ChangeType, result2 *graph.Graph) { fake.processMutex.Lock() defer fake.processMutex.Unlock() fake.ProcessStub = nil if fake.processReturnsOnCall == nil { fake.processReturnsOnCall = make(map[int]struct { - result1 bool + result1 state.ChangeType result2 *graph.Graph }) } fake.processReturnsOnCall[i] = struct { - result1 bool + result1 state.ChangeType result2 *graph.Graph }{result1, result2} } diff --git a/internal/mode/static/state/store.go b/internal/mode/static/state/store.go index 5e58311402..ac0659fe7c 100644 --- a/internal/mode/static/state/store.go +++ b/internal/mode/static/state/store.go @@ -4,6 +4,7 @@ import ( "fmt" apiv1 "k8s.io/api/core/v1" + discoveryV1 "k8s.io/api/discovery/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -134,7 +135,7 @@ type changeTrackingUpdater struct { extractGVK extractGVKFunc supportedGVKs gvkList - changed bool + changeType ChangeType } func newChangeTrackingUpdater( @@ -167,6 +168,7 @@ func newChangeTrackingUpdater( extractGVK: extractGVK, supportedGVKs: supportedGVKs, stateChangedPredicates: stateChangedPredicates, + changeType: NoChange, } } @@ -200,7 +202,7 @@ func (s *changeTrackingUpdater) Upsert(obj client.Object) { changingUpsert := s.upsert(obj) - s.changed = s.changed || changingUpsert + s.setChangeType(obj, changingUpsert) } func (s *changeTrackingUpdater) delete(objType client.Object, nsname types.NamespacedName) (changed bool) { @@ -227,15 +229,30 @@ func (s *changeTrackingUpdater) Delete(objType client.Object, nsname types.Names changingDelete := s.delete(objType, nsname) - s.changed = s.changed || changingDelete + s.setChangeType(objType, changingDelete) } -// getAndResetChangedStatus returns true if the previous updates (Upserts/Deletes) require an update of -// the configuration of the data plane. It also resets the changed status to false. -func (s *changeTrackingUpdater) getAndResetChangedStatus() bool { - changed := s.changed - s.changed = false - return changed +// getAndResetChangedStatus returns the type of change that occurred based on the previous updates (Upserts/Deletes). +// It also resets the changed status to NoChange. +func (s *changeTrackingUpdater) getAndResetChangedStatus() ChangeType { + changeType := s.changeType + s.changeType = NoChange + return changeType +} + +// setChangeType determines and sets the type of change that occurred. +// - if no change occurred on this object, then keep the changeType as-is (could've been set by another object event) +// - if changeType is already a ClusterStateChange, then we don't need to update the value +// - otherwise, if we are processing an Endpoint update, then this is an EndpointsOnlyChange changeType +// - otherwise, this is a different object, and is a ClusterStateChange changeType +func (s *changeTrackingUpdater) setChangeType(obj client.Object, changed bool) { + if changed && s.changeType != ClusterStateChange { + if _, ok := obj.(*discoveryV1.EndpointSlice); ok { + s.changeType = EndpointsOnlyChange + } else { + s.changeType = ClusterStateChange + } + } } type upsertValidatorFunc func(obj client.Object) error diff --git a/internal/mode/static/state/store_test.go b/internal/mode/static/state/store_test.go new file mode 100644 index 0000000000..b4184d3154 --- /dev/null +++ b/internal/mode/static/state/store_test.go @@ -0,0 +1,53 @@ +package state + +import ( + "testing" + + . "github.com/onsi/gomega" + discoveryV1 "k8s.io/api/discovery/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + v1 "sigs.k8s.io/gateway-api/apis/v1" +) + +func TestSetChangeType(t *testing.T) { + ctu := newChangeTrackingUpdater(nil, nil) + + // Order matters for these cases. + tests := []struct { + obj client.Object + name string + exp ChangeType + changed bool + }{ + { + name: "no change", + exp: NoChange, + }, + { + name: "endpoint object", + obj: &discoveryV1.EndpointSlice{}, + changed: true, + exp: EndpointsOnlyChange, + }, + { + name: "non-endpoint object", + obj: &v1.HTTPRoute{}, + changed: true, + exp: ClusterStateChange, + }, + { + name: "changeType was previously set to ClusterStateChange", + obj: &discoveryV1.EndpointSlice{}, + changed: true, + exp: ClusterStateChange, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + g := NewWithT(t) + ctu.setChangeType(test.obj, test.changed) + g.Expect(ctu.changeType).To(Equal(test.exp)) + }) + } +} diff --git a/site/content/overview/gateway-api-compatibility.md b/site/content/overview/gateway-api-compatibility.md index b10133fe01..7a22a7cdd7 100644 --- a/site/content/overview/gateway-api-compatibility.md +++ b/site/content/overview/gateway-api-compatibility.md @@ -1,7 +1,7 @@ --- title: "Gateway API Compatibility" description: "Learn which Gateway API resources NGINX Gateway Fabric supports and the extent of that support." -weight: 700 +weight: 200 toc: true docs: "DOCS-000" --- diff --git a/site/content/overview/gateway-architecture.md b/site/content/overview/gateway-architecture.md index d34e1c686a..d96a5ee197 100644 --- a/site/content/overview/gateway-architecture.md +++ b/site/content/overview/gateway-architecture.md @@ -98,6 +98,10 @@ The previous diagram depicts NGINX Gateway Fabric using NGINX Open Source. NGINX - An _admin_ can connect to the NGINX Plus API using port 8765. NGINX only allows connections from localhost. +## Updating upstream servers + +The normal process to update any changes to NGINX is to write the configuration files and reload NGINX. However, when using NGINX Plus, we can take advantage of the [NGINX Plus API](http://nginx.org/en/docs/http/ngx_http_api_module.html) to limit the amount of reloads triggered when making changes to NGINX. Specifically, when the endpoints of an application in Kubernetes change (Such as scaling up or down), the NGINX Plus API is used to update the upstream servers in NGINX with the new endpoints without a reload. This reduces the potential for a disruption that could occur when reloading. + ## Pod readiness The `nginx-gateway` container includes a readiness endpoint available through the path `/readyz`. A [readiness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) periodically checks the endpoint on startup, returning a `200 OK` response when the pod can accept traffic for the data plane. Once the control plane successfully starts, the pod becomes ready. diff --git a/site/content/overview/nginx-plus.md b/site/content/overview/nginx-plus.md new file mode 100644 index 0000000000..aa54b09687 --- /dev/null +++ b/site/content/overview/nginx-plus.md @@ -0,0 +1,15 @@ +--- +title: "Advanced features of NGINX Plus" +weight: 300 +toc: true +docs: "DOCS-000" +--- + +NGINX Gateway Fabric can use NGINX Open Source or NGINX Plus as its data plane. [NGINX Plus](https://www.nginx.com/products/nginx/) is the closed source, commercial version of NGINX. Using NGINX Plus as the data plane offers additional benefits compared to the open source version. + +## Benefits of NGINX Plus + +- **Robust metrics**: A plethora of [additional Prometheus metrics](https://github.com/nginxinc/nginx-prometheus-exporter#metrics-for-nginx-plus) are available. +- **Live activity monitoring**: The [NGINX Plus dashboard]({{< relref "/how-to/monitoring/dashboard.md" >}}) shows real-time metrics and information about your server infrastructure. +- **Dynamic upstream configuration**: NGINX Plus can dynamically reconfigure upstream servers when applications in Kubernetes scale up and down, preventing the need for an NGINX reload. +- **Support**: With an NGINX Plus license, you can take advantage of full [support](https://www.nginx.com/support/) from NGINX, Inc. diff --git a/site/content/overview/resource-validation.md b/site/content/overview/resource-validation.md index 2519aeef20..2c3c52a2f4 100644 --- a/site/content/overview/resource-validation.md +++ b/site/content/overview/resource-validation.md @@ -1,6 +1,6 @@ --- title: "Resource Validation" -weight: 800 +weight: 400 toc: true docs: "DOCS-000" ---