From cf970d7b6085939aa4ad540cabc8307cfed5aa0a Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 21 Aug 2023 16:29:34 -0400 Subject: [PATCH 01/16] Reduce required type arguments for DecodedResource --- internal/catalog/exports.go | 2 +- .../controllers/failover/controller.go | 20 +++++++-------- .../mappers/failovermapper/failover_mapper.go | 2 +- .../failovermapper/failover_mapper_test.go | 6 ++--- .../internal/types/failover_policy_test.go | 12 ++++----- internal/resource/decode.go | 25 +++++++------------ internal/resource/decode_test.go | 8 +++--- internal/resource/resourcetest/decode.go | 7 ++---- 8 files changed, 36 insertions(+), 46 deletions(-) diff --git a/internal/catalog/exports.go b/internal/catalog/exports.go index 4019fbeb51ac..566c2e2b6edf 100644 --- a/internal/catalog/exports.go +++ b/internal/catalog/exports.go @@ -118,7 +118,7 @@ func SimplifyFailoverPolicy(svc *pbcatalog.Service, failover *pbcatalog.Failover // FailoverPolicyMapper maintains the bidirectional tracking relationship of a // FailoverPolicy to the Services related to it. type FailoverPolicyMapper interface { - TrackFailover(failover *resource.DecodedResource[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy]) + TrackFailover(failover *resource.DecodedResource[*pbcatalog.FailoverPolicy]) UntrackFailover(failoverID *pbresource.ID) FailoverIDsByService(svcID *pbresource.ID) []*pbresource.ID } diff --git a/internal/catalog/internal/controllers/failover/controller.go b/internal/catalog/internal/controllers/failover/controller.go index ea6efa992d9f..9accb62aa447 100644 --- a/internal/catalog/internal/controllers/failover/controller.go +++ b/internal/catalog/internal/controllers/failover/controller.go @@ -20,7 +20,7 @@ type FailoverMapper interface { // TrackFailover extracts all Service references from the provided // FailoverPolicy and indexes them so that MapService can turn Service // events into FailoverPolicy events properly. - TrackFailover(failover *resource.DecodedResource[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy]) + TrackFailover(failover *resource.DecodedResource[*pbcatalog.FailoverPolicy]) // UntrackFailover forgets the links inserted by TrackFailover for the // provided FailoverPolicyID. @@ -86,7 +86,7 @@ func (r *failoverPolicyReconciler) Reconcile(ctx context.Context, rt controller. rt.Logger.Error("error retrieving corresponding service", "error", err) return err } - destServices := make(map[resource.ReferenceKey]*resource.DecodedResource[pbcatalog.Service, *pbcatalog.Service]) + destServices := make(map[resource.ReferenceKey]*resource.DecodedResource[*pbcatalog.Service]) if service != nil { destServices[resource.NewReferenceKey(serviceID)] = service } @@ -148,18 +148,18 @@ func (r *failoverPolicyReconciler) Reconcile(ctx context.Context, rt controller. return nil } -func getFailoverPolicy(ctx context.Context, rt controller.Runtime, id *pbresource.ID) (*resource.DecodedResource[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy], error) { - return resource.GetDecodedResource[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy](ctx, rt.Client, id) +func getFailoverPolicy(ctx context.Context, rt controller.Runtime, id *pbresource.ID) (*resource.DecodedResource[*pbcatalog.FailoverPolicy], error) { + return resource.GetDecodedResource[*pbcatalog.FailoverPolicy](ctx, rt.Client, id) } -func getService(ctx context.Context, rt controller.Runtime, id *pbresource.ID) (*resource.DecodedResource[pbcatalog.Service, *pbcatalog.Service], error) { - return resource.GetDecodedResource[pbcatalog.Service, *pbcatalog.Service](ctx, rt.Client, id) +func getService(ctx context.Context, rt controller.Runtime, id *pbresource.ID) (*resource.DecodedResource[*pbcatalog.Service], error) { + return resource.GetDecodedResource[*pbcatalog.Service](ctx, rt.Client, id) } func computeNewStatus( - failoverPolicy *resource.DecodedResource[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy], - service *resource.DecodedResource[pbcatalog.Service, *pbcatalog.Service], - destServices map[resource.ReferenceKey]*resource.DecodedResource[pbcatalog.Service, *pbcatalog.Service], + failoverPolicy *resource.DecodedResource[*pbcatalog.FailoverPolicy], + service *resource.DecodedResource[*pbcatalog.Service], + destServices map[resource.ReferenceKey]*resource.DecodedResource[*pbcatalog.Service], ) *pbresource.Status { if service == nil { return &pbresource.Status{ @@ -238,7 +238,7 @@ func computeNewStatus( func serviceHasPort( dest *pbcatalog.FailoverDestination, - destServices map[resource.ReferenceKey]*resource.DecodedResource[pbcatalog.Service, *pbcatalog.Service], + destServices map[resource.ReferenceKey]*resource.DecodedResource[*pbcatalog.Service], ) *pbresource.Condition { key := resource.NewReferenceKey(dest.Ref) destService, ok := destServices[key] diff --git a/internal/catalog/internal/mappers/failovermapper/failover_mapper.go b/internal/catalog/internal/mappers/failovermapper/failover_mapper.go index 5c23a1bfe3a1..4ae6776cb66c 100644 --- a/internal/catalog/internal/mappers/failovermapper/failover_mapper.go +++ b/internal/catalog/internal/mappers/failovermapper/failover_mapper.go @@ -31,7 +31,7 @@ func New() *Mapper { // TrackFailover extracts all Service references from the provided // FailoverPolicy and indexes them so that MapService can turn Service events // into FailoverPolicy events properly. -func (m *Mapper) TrackFailover(failover *resource.DecodedResource[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy]) { +func (m *Mapper) TrackFailover(failover *resource.DecodedResource[*pbcatalog.FailoverPolicy]) { destRefs := failover.Data.GetUnderlyingDestinationRefs() destRefs = append(destRefs, &pbresource.Reference{ Type: types.ServiceType, diff --git a/internal/catalog/internal/mappers/failovermapper/failover_mapper_test.go b/internal/catalog/internal/mappers/failovermapper/failover_mapper_test.go index 8a4ac2d7227d..048f444eca61 100644 --- a/internal/catalog/internal/mappers/failovermapper/failover_mapper_test.go +++ b/internal/catalog/internal/mappers/failovermapper/failover_mapper_test.go @@ -59,7 +59,7 @@ func TestMapper_Tracking(t *testing.T) { }). Build() rtest.ValidateAndNormalize(t, registry, fail1) - failDec1 := rtest.MustDecode[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy](t, fail1) + failDec1 := rtest.MustDecode[*pbcatalog.FailoverPolicy](t, fail1) fail2 := rtest.Resource(types.FailoverPolicyType, "www"). WithData(t, &pbcatalog.FailoverPolicy{ @@ -72,7 +72,7 @@ func TestMapper_Tracking(t *testing.T) { }). Build() rtest.ValidateAndNormalize(t, registry, fail2) - failDec2 := rtest.MustDecode[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy](t, fail2) + failDec2 := rtest.MustDecode[*pbcatalog.FailoverPolicy](t, fail2) fail1_updated := rtest.Resource(types.FailoverPolicyType, "api"). WithData(t, &pbcatalog.FailoverPolicy{ @@ -84,7 +84,7 @@ func TestMapper_Tracking(t *testing.T) { }). Build() rtest.ValidateAndNormalize(t, registry, fail1_updated) - failDec1_updated := rtest.MustDecode[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy](t, fail1_updated) + failDec1_updated := rtest.MustDecode[*pbcatalog.FailoverPolicy](t, fail1_updated) m := New() diff --git a/internal/catalog/internal/types/failover_policy_test.go b/internal/catalog/internal/types/failover_policy_test.go index 8f2ad9717298..41bfd3d82770 100644 --- a/internal/catalog/internal/types/failover_policy_test.go +++ b/internal/catalog/internal/types/failover_policy_test.go @@ -31,7 +31,7 @@ func TestMutateFailoverPolicy(t *testing.T) { err := MutateFailoverPolicy(res) - got := resourcetest.MustDecode[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy](t, res) + got := resourcetest.MustDecode[*pbcatalog.FailoverPolicy](t, res) if tc.expectErr == "" { require.NoError(t, err) @@ -162,13 +162,13 @@ func TestValidateFailoverPolicy(t *testing.T) { require.NoError(t, MutateFailoverPolicy(res)) // Verify that mutate didn't actually change the object. - got := resourcetest.MustDecode[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy](t, res) + got := resourcetest.MustDecode[*pbcatalog.FailoverPolicy](t, res) prototest.AssertDeepEqual(t, tc.failover, got.Data) err := ValidateFailoverPolicy(res) // Verify that validate didn't actually change the object. - got = resourcetest.MustDecode[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy](t, res) + got = resourcetest.MustDecode[*pbcatalog.FailoverPolicy](t, res) prototest.AssertDeepEqual(t, tc.failover, got.Data) if tc.expectErr == "" { @@ -359,9 +359,9 @@ func TestSimplifyFailoverPolicy(t *testing.T) { resourcetest.ValidateAndNormalize(t, registry, tc.failover) resourcetest.ValidateAndNormalize(t, registry, tc.expect) - svc := resourcetest.MustDecode[pbcatalog.Service, *pbcatalog.Service](t, tc.svc) - failover := resourcetest.MustDecode[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy](t, tc.failover) - expect := resourcetest.MustDecode[pbcatalog.FailoverPolicy, *pbcatalog.FailoverPolicy](t, tc.expect) + svc := resourcetest.MustDecode[*pbcatalog.Service](t, tc.svc) + failover := resourcetest.MustDecode[*pbcatalog.FailoverPolicy](t, tc.failover) + expect := resourcetest.MustDecode[*pbcatalog.FailoverPolicy](t, tc.expect) inputFailoverCopy := proto.Clone(failover.Data).(*pbcatalog.FailoverPolicy) diff --git a/internal/resource/decode.go b/internal/resource/decode.go index 7b1fb7b36450..c610898ca3c2 100644 --- a/internal/resource/decode.go +++ b/internal/resource/decode.go @@ -15,27 +15,23 @@ import ( // DecodedResource is a generic holder to contain an original Resource and its // decoded contents. -type DecodedResource[V any, PV interface { - proto.Message - *V -}] struct { +type DecodedResource[T proto.Message] struct { Resource *pbresource.Resource - Data PV + Data T } // Decode will generically decode the provided resource into a 2-field // structure that holds onto the original Resource and the decoded contents. // // Returns an ErrDataParse on unmarshalling errors. -func Decode[V any, PV interface { - proto.Message - *V -}](res *pbresource.Resource) (*DecodedResource[V, PV], error) { - data := PV(new(V)) +func Decode[T proto.Message](res *pbresource.Resource) (*DecodedResource[T], error) { + var zero T + data := zero.ProtoReflect().New().Interface().(T) + if err := res.Data.UnmarshalTo(data); err != nil { return nil, NewErrDataParse(data, err) } - return &DecodedResource[V, PV]{ + return &DecodedResource[T]{ Resource: res, Data: data, }, nil @@ -43,10 +39,7 @@ func Decode[V any, PV interface { // GetDecodedResource will generically read the requested resource using the // client and either return nil on a NotFound or decode the response value. -func GetDecodedResource[V any, PV interface { - proto.Message - *V -}](ctx context.Context, client pbresource.ResourceServiceClient, id *pbresource.ID) (*DecodedResource[V, PV], error) { +func GetDecodedResource[T proto.Message](ctx context.Context, client pbresource.ResourceServiceClient, id *pbresource.ID) (*DecodedResource[T], error) { rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: id}) switch { case status.Code(err) == codes.NotFound: @@ -55,5 +48,5 @@ func GetDecodedResource[V any, PV interface { return nil, err } - return Decode[V, PV](rsp.Resource) + return Decode[T](rsp.Resource) } diff --git a/internal/resource/decode_test.go b/internal/resource/decode_test.go index 31ebe47c64bf..17c1bd7f1b07 100644 --- a/internal/resource/decode_test.go +++ b/internal/resource/decode_test.go @@ -34,7 +34,7 @@ func TestGetDecodedResource(t *testing.T) { } testutil.RunStep(t, "not found", func(t *testing.T) { - got, err := resource.GetDecodedResource[pbdemo.Artist, *pbdemo.Artist](ctx, client, babypantsID) + got, err := resource.GetDecodedResource[*pbdemo.Artist](ctx, client, babypantsID) require.NoError(t, err) require.Nil(t, got) }) @@ -47,7 +47,7 @@ func TestGetDecodedResource(t *testing.T) { WithData(t, data). Write(t, client) - got, err := resource.GetDecodedResource[pbdemo.Artist, *pbdemo.Artist](ctx, client, babypantsID) + got, err := resource.GetDecodedResource[*pbdemo.Artist](ctx, client, babypantsID) require.NoError(t, err) require.NotNil(t, got) @@ -84,7 +84,7 @@ func TestDecode(t *testing.T) { }, } - dec, err := resource.Decode[pbdemo.Artist, *pbdemo.Artist](foo) + dec, err := resource.Decode[*pbdemo.Artist](foo) require.NoError(t, err) prototest.AssertDeepEqual(t, foo, dec.Resource) @@ -107,7 +107,7 @@ func TestDecode(t *testing.T) { }, } - _, err := resource.Decode[pbdemo.Artist, *pbdemo.Artist](foo) + _, err := resource.Decode[*pbdemo.Artist](foo) require.Error(t, err) }) } diff --git a/internal/resource/resourcetest/decode.go b/internal/resource/resourcetest/decode.go index 077bbc0dd514..d68fff865517 100644 --- a/internal/resource/resourcetest/decode.go +++ b/internal/resource/resourcetest/decode.go @@ -13,11 +13,8 @@ import ( "github.com/hashicorp/consul/proto-public/pbresource" ) -func MustDecode[V any, PV interface { - proto.Message - *V -}](t *testing.T, res *pbresource.Resource) *resource.DecodedResource[V, PV] { - dec, err := resource.Decode[V, PV](res) +func MustDecode[T proto.Message](t *testing.T, res *pbresource.Resource) *resource.DecodedResource[T] { + dec, err := resource.Decode[T](res) require.NoError(t, err) return dec } From 75bc5d8da4c48e7985f290ea2e8d62802b769aad Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Tue, 8 Aug 2023 10:14:42 -0500 Subject: [PATCH 02/16] mesh: add validation for the new pbmesh resources --- .../mesh/internal/types/computed_routes.go | 47 +- .../internal/types/computed_routes_test.go | 79 +++ .../mesh/internal/types/destination_policy.go | 142 ++++- .../internal/types/destination_policy_test.go | 413 +++++++++++++ internal/mesh/internal/types/grpc_route.go | 185 +++++- .../mesh/internal/types/grpc_route_test.go | 92 +++ internal/mesh/internal/types/http_route.go | 550 +++++++++++++++++- .../mesh/internal/types/http_route_test.go | 236 ++++++++ .../internal/types/proxy_configuration.go | 5 +- internal/mesh/internal/types/tcp_route.go | 69 ++- .../mesh/internal/types/tcp_route_test.go | 79 +++ internal/mesh/internal/types/util.go | 52 ++ internal/mesh/internal/types/xroute.go | 20 + proto-public/pbmesh/v1alpha1/xroute_addons.go | 91 +++ .../pbmesh/v1alpha1/xroute_addons_test.go | 174 ++++++ 15 files changed, 2223 insertions(+), 11 deletions(-) create mode 100644 internal/mesh/internal/types/computed_routes_test.go create mode 100644 internal/mesh/internal/types/destination_policy_test.go create mode 100644 internal/mesh/internal/types/grpc_route_test.go create mode 100644 internal/mesh/internal/types/http_route_test.go create mode 100644 internal/mesh/internal/types/tcp_route_test.go create mode 100644 internal/mesh/internal/types/util.go create mode 100644 internal/mesh/internal/types/xroute.go create mode 100644 proto-public/pbmesh/v1alpha1/xroute_addons.go create mode 100644 proto-public/pbmesh/v1alpha1/xroute_addons_test.go diff --git a/internal/mesh/internal/types/computed_routes.go b/internal/mesh/internal/types/computed_routes.go index 8314a2cc2e28..c43627c896c4 100644 --- a/internal/mesh/internal/types/computed_routes.go +++ b/internal/mesh/internal/types/computed_routes.go @@ -4,6 +4,8 @@ package types import ( + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/consul/internal/resource" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -27,6 +29,49 @@ func RegisterComputedRoutes(r resource.Registry) { r.Register(resource.Registration{ Type: ComputedRoutesV1Alpha1Type, Proto: &pbmesh.ComputedRoutes{}, - Validate: nil, + Validate: ValidateComputedRoutes, }) } + +func ValidateComputedRoutes(res *pbresource.Resource) error { + var config pbmesh.ComputedRoutes + + if err := res.Data.UnmarshalTo(&config); err != nil { + return resource.NewErrDataParse(&config, err) + } + + var merr error + + if len(config.PortedConfigs) == 0 { + merr = multierror.Append(merr, resource.ErrInvalidField{ + Name: "ported_configs", + Wrapped: resource.ErrEmpty, + }) + } + + // TODO(rb): do more elaborate validation + + for port, pmc := range config.PortedConfigs { + wrapErr := func(err error) error { + return resource.ErrInvalidMapValue{ + Map: "ported_configs", + Key: port, + Wrapped: err, + } + } + if pmc.Config == nil { + merr = multierror.Append(merr, wrapErr(resource.ErrInvalidField{ + Name: "config", + Wrapped: resource.ErrEmpty, + })) + } + if len(pmc.Targets) == 0 { + merr = multierror.Append(merr, wrapErr(resource.ErrInvalidField{ + Name: "targets", + Wrapped: resource.ErrEmpty, + })) + } + } + + return merr +} diff --git a/internal/mesh/internal/types/computed_routes_test.go b/internal/mesh/internal/types/computed_routes_test.go new file mode 100644 index 000000000000..0f5dacc102e8 --- /dev/null +++ b/internal/mesh/internal/types/computed_routes_test.go @@ -0,0 +1,79 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/hashicorp/consul/sdk/testutil" +) + +func TestValidateComputedRoutes(t *testing.T) { + type testcase struct { + routes *pbmesh.ComputedRoutes + expectErr string + } + + run := func(t *testing.T, tc testcase) { + res := resourcetest.Resource(ComputedRoutesType, "api"). + WithData(t, tc.routes). + Build() + + err := ValidateComputedRoutes(res) + + // Verify that validate didn't actually change the object. + got := resourcetest.MustDecode[pbmesh.ComputedRoutes, *pbmesh.ComputedRoutes](t, res) + prototest.AssertDeepEqual(t, tc.routes, got.Data) + + if tc.expectErr == "" { + require.NoError(t, err) + } else { + testutil.RequireErrorContains(t, err, tc.expectErr) + } + } + + cases := map[string]testcase{ + "empty": { + routes: &pbmesh.ComputedRoutes{}, + expectErr: `invalid "ported_configs" field: cannot be empty`, + }, + "empty targets": { + routes: &pbmesh.ComputedRoutes{ + PortedConfigs: map[string]*pbmesh.ComputedPortRoutes{ + "http": { + Config: &pbmesh.ComputedPortRoutes_Tcp{ + Tcp: &pbmesh.InterpretedTCPRoute{}, + }, + }, + }, + }, + expectErr: `invalid value of key "http" within ported_configs: invalid "targets" field: cannot be empty`, + }, + "valid": { + routes: &pbmesh.ComputedRoutes{ + PortedConfigs: map[string]*pbmesh.ComputedPortRoutes{ + "http": { + Config: &pbmesh.ComputedPortRoutes_Tcp{ + Tcp: &pbmesh.InterpretedTCPRoute{}, + }, + Targets: map[string]*pbmesh.BackendTargetDetails{ + "foo": {}, + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} diff --git a/internal/mesh/internal/types/destination_policy.go b/internal/mesh/internal/types/destination_policy.go index e3f6bfbd0120..d9f076add2dd 100644 --- a/internal/mesh/internal/types/destination_policy.go +++ b/internal/mesh/internal/types/destination_policy.go @@ -4,6 +4,11 @@ package types import ( + "errors" + "fmt" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/consul/internal/resource" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -27,6 +32,141 @@ func RegisterDestinationPolicy(r resource.Registry) { r.Register(resource.Registration{ Type: DestinationPolicyV1Alpha1Type, Proto: &pbmesh.DestinationPolicy{}, - Validate: nil, + Validate: ValidateDestinationPolicy, }) } + +func ValidateDestinationPolicy(res *pbresource.Resource) error { + var policy pbmesh.DestinationPolicy + + if err := res.Data.UnmarshalTo(&policy); err != nil { + return resource.NewErrDataParse(&policy, err) + } + + var merr error + + if len(policy.PortConfigs) == 0 { + merr = multierror.Append(merr, resource.ErrInvalidField{ + Name: "port_configs", + Wrapped: resource.ErrEmpty, + }) + } + + for port, pc := range policy.PortConfigs { + wrapErr := func(err error) error { + return resource.ErrInvalidMapValue{ + Map: "port_configs", + Key: port, + Wrapped: err, + } + } + + if dur := pc.ConnectTimeout.AsDuration(); dur < 0 { + merr = multierror.Append(merr, wrapErr(resource.ErrInvalidField{ + Name: "connect_timeout", + Wrapped: fmt.Errorf("'%v', must be >= 0", dur), + })) + } + if dur := pc.RequestTimeout.AsDuration(); dur < 0 { + merr = multierror.Append(merr, wrapErr(resource.ErrInvalidField{ + Name: "request_timeout", + Wrapped: fmt.Errorf("'%v', must be >= 0", dur), + })) + } + + if pc.LoadBalancer != nil { + lb := pc.LoadBalancer + wrapLBErr := func(err error) error { + return wrapErr(resource.ErrInvalidMapValue{ + Map: "load_balancer", + Key: port, + Wrapped: err, + }) + } + + if lb.Policy != pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_RING_HASH && lb.Config != nil { + if _, ok := lb.Config.(*pbmesh.LoadBalancer_RingHashConfig); ok { + merr = multierror.Append(merr, wrapLBErr(resource.ErrInvalidField{ + Name: "config", + Wrapped: fmt.Errorf("RingHashConfig specified for incompatible load balancing policy %q", lb.Policy), + })) + } + } + + if lb.Policy != pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_LEAST_REQUEST && lb.Config != nil { + if _, ok := lb.Config.(*pbmesh.LoadBalancer_LeastRequestConfig); ok { + merr = multierror.Append(merr, wrapLBErr(resource.ErrInvalidField{ + Name: "config", + Wrapped: fmt.Errorf("LeastRequestConfig specified for incompatible load balancing policy %q", lb.Policy), + })) + } + } + + if !lb.Policy.IsHashBased() && len(lb.HashPolicies) > 0 { + merr = multierror.Append(merr, wrapLBErr(resource.ErrInvalidField{ + Name: "hash_policies", + Wrapped: fmt.Errorf("hash_policies specified for non-hash-based policy %q", lb.Policy), + })) + } + + for i, hp := range lb.HashPolicies { + wrapHPErr := func(err error) error { + return wrapLBErr(resource.ErrInvalidListElement{ + Name: "hash_policies", + Index: i, + Wrapped: err, + }) + } + + hasField := (hp.Field != pbmesh.HashPolicyField_HASH_POLICY_FIELD_UNSPECIFIED) + + if hp.SourceIp { + if hasField { + merr = multierror.Append(merr, wrapHPErr(resource.ErrInvalidField{ + Name: "field", + Wrapped: fmt.Errorf("a single hash policy cannot hash both a source address and a %q", hp.Field), + })) + } + if hp.FieldValue != "" { + merr = multierror.Append(merr, wrapHPErr(resource.ErrInvalidField{ + Name: "field_value", + Wrapped: errors.New("cannot be specified when hashing source_ip"), + })) + } + } + + if hasField && hp.FieldValue == "" { + merr = multierror.Append(merr, wrapHPErr(resource.ErrInvalidField{ + Name: "field_value", + Wrapped: fmt.Errorf("field %q was specified without a field_value", hp.Field), + })) + } + if hp.FieldValue != "" && !hasField { + merr = multierror.Append(merr, wrapHPErr(resource.ErrInvalidField{ + Name: "field_value", + Wrapped: errors.New("requires a field to apply to"), + })) + } + if hp.CookieConfig != nil { + if hp.Field != pbmesh.HashPolicyField_HASH_POLICY_FIELD_COOKIE { + merr = multierror.Append(merr, wrapHPErr(resource.ErrInvalidField{ + Name: "cookie_config", + Wrapped: fmt.Errorf("incompatible with field %q", hp.Field), + })) + } + if hp.CookieConfig.Session && hp.CookieConfig.Ttl.AsDuration() != 0 { + merr = multierror.Append(merr, wrapHPErr(resource.ErrInvalidField{ + Name: "cookie_config", + Wrapped: resource.ErrInvalidField{ + Name: "ttl", + Wrapped: fmt.Errorf("a session cookie cannot have an associated TTL"), + }, + })) + } + } + } + } + } + + return merr +} diff --git a/internal/mesh/internal/types/destination_policy_test.go b/internal/mesh/internal/types/destination_policy_test.go new file mode 100644 index 000000000000..6654ae2aa7fb --- /dev/null +++ b/internal/mesh/internal/types/destination_policy_test.go @@ -0,0 +1,413 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package types + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/durationpb" + + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/hashicorp/consul/sdk/testutil" +) + +func TestValidateDestinationPolicy(t *testing.T) { + type testcase struct { + policy *pbmesh.DestinationPolicy + expectErr string + expectErrs []string + } + + run := func(t *testing.T, tc testcase) { + res := resourcetest.Resource(DestinationPolicyType, "api"). + WithData(t, tc.policy). + Build() + + err := ValidateDestinationPolicy(res) + + // Verify that validate didn't actually change the object. + got := resourcetest.MustDecode[pbmesh.DestinationPolicy, *pbmesh.DestinationPolicy](t, res) + prototest.AssertDeepEqual(t, tc.policy, got.Data) + + if tc.expectErr != "" && len(tc.expectErrs) > 0 { + t.Fatalf("cannot test singular and list errors at the same time") + } + + if tc.expectErr == "" && len(tc.expectErrs) == 0 { + require.NoError(t, err) + } else if tc.expectErr != "" { + testutil.RequireErrorContains(t, err, tc.expectErr) + } else { + for _, expectErr := range tc.expectErrs { + testutil.RequireErrorContains(t, err, expectErr) + } + } + } + + cases := map[string]testcase{ + // emptiness + "empty": { + policy: &pbmesh.DestinationPolicy{}, + expectErr: `invalid "port_configs" field: cannot be empty`, + }, + "good connect timeout": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + }, + }, + }, + }, + "bad connect timeout": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(-55 * time.Second), + }, + }, + }, + expectErr: `invalid value of key "http" within port_configs: invalid "connect_timeout" field: '-55s', must be >= 0`, + }, + "good request timeout": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + RequestTimeout: durationpb.New(55 * time.Second), + }, + }, + }, + }, + "bad request timeout": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + RequestTimeout: durationpb.New(-55 * time.Second), + }, + }, + }, + expectErr: `invalid value of key "http" within port_configs: invalid "request_timeout" field: '-55s', must be >= 0`, + }, + // load balancer + "lbpolicy: supported": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_RANDOM, + }, + }, + }, + }, + }, + "lbpolicy: bad for least request config": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_RING_HASH, + Config: &pbmesh.LoadBalancer_LeastRequestConfig{ + LeastRequestConfig: &pbmesh.LeastRequestConfig{ + ChoiceCount: 10, + }, + }, + }, + }, + }, + }, + expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid "config" field: LeastRequestConfig specified for incompatible load balancing policy "LOAD_BALANCER_POLICY_RING_HASH"`, + }, + "lbpolicy: bad for ring hash config": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_LEAST_REQUEST, + Config: &pbmesh.LoadBalancer_RingHashConfig{ + RingHashConfig: &pbmesh.RingHashConfig{ + MinimumRingSize: 1024, + }, + }, + }, + }, + }, + }, + expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid "config" field: RingHashConfig specified for incompatible load balancing policy "LOAD_BALANCER_POLICY_LEAST_REQUEST"`, + }, + "lbpolicy: good for least request config": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_LEAST_REQUEST, + Config: &pbmesh.LoadBalancer_LeastRequestConfig{ + LeastRequestConfig: &pbmesh.LeastRequestConfig{ + ChoiceCount: 10, + }, + }, + }, + }, + }, + }, + }, + "lbpolicy: good for ring hash config": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_RING_HASH, + Config: &pbmesh.LoadBalancer_RingHashConfig{ + RingHashConfig: &pbmesh.RingHashConfig{ + MinimumRingSize: 1024, + }, + }, + }, + }, + }, + }, + }, + "lbpolicy: empty policy with hash policy": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + HashPolicies: []*pbmesh.HashPolicy{ + {SourceIp: true}, + }, + }, + }, + }, + }, + expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid "hash_policies" field: hash_policies specified for non-hash-based policy "LOAD_BALANCER_POLICY_UNSPECIFIED"`, + }, + "lbconfig: cookie config with header policy": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_MAGLEV, + HashPolicies: []*pbmesh.HashPolicy{ + { + Field: pbmesh.HashPolicyField_HASH_POLICY_FIELD_HEADER, + FieldValue: "x-user-id", + CookieConfig: &pbmesh.CookieConfig{ + Ttl: durationpb.New(10 * time.Second), + Path: "/root", + }, + }, + }, + }, + }, + }, + }, + expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "cookie_config" field: incompatible with field "HASH_POLICY_FIELD_HEADER"`, + }, + "lbconfig: cannot generate session cookie with ttl": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_MAGLEV, + HashPolicies: []*pbmesh.HashPolicy{ + { + Field: pbmesh.HashPolicyField_HASH_POLICY_FIELD_COOKIE, + FieldValue: "good-cookie", + CookieConfig: &pbmesh.CookieConfig{ + Session: true, + Ttl: durationpb.New(10 * time.Second), + }, + }, + }, + }, + }, + }, + }, + expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "cookie_config" field: invalid "ttl" field: a session cookie cannot have an associated TTL`, + }, + "lbconfig: valid cookie policy": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_MAGLEV, + HashPolicies: []*pbmesh.HashPolicy{ + { + Field: pbmesh.HashPolicyField_HASH_POLICY_FIELD_COOKIE, + FieldValue: "good-cookie", + CookieConfig: &pbmesh.CookieConfig{ + Ttl: durationpb.New(10 * time.Second), + Path: "/oven", + }, + }, + }, + }, + }, + }, + }, + }, + "lbconfig: supported match field": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_MAGLEV, + HashPolicies: []*pbmesh.HashPolicy{ + { + Field: pbmesh.HashPolicyField_HASH_POLICY_FIELD_HEADER, + FieldValue: "X-Consul-Token", + }, + }, + }, + }, + }, + }, + }, + "lbconfig: cannot match on source address and custom field": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_MAGLEV, + HashPolicies: []*pbmesh.HashPolicy{ + { + Field: pbmesh.HashPolicyField_HASH_POLICY_FIELD_HEADER, + SourceIp: true, + }, + }, + }, + }, + }, + }, + expectErrs: []string{ + `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "field" field: a single hash policy cannot hash both a source address and a "HASH_POLICY_FIELD_HEADER"`, + `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "field_value" field: field "HASH_POLICY_FIELD_HEADER" was specified without a field_value`, + }, + }, + "lbconfig: matchvalue not compatible with source address": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_MAGLEV, + HashPolicies: []*pbmesh.HashPolicy{ + { + FieldValue: "X-Consul-Token", + SourceIp: true, + }, + }, + }, + }, + }, + }, + expectErrs: []string{ + `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "field_value" field: cannot be specified when hashing source_ip`, + `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "field_value" field: requires a field to apply to`, + }, + }, + "lbconfig: field without match value": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_MAGLEV, + HashPolicies: []*pbmesh.HashPolicy{ + { + Field: pbmesh.HashPolicyField_HASH_POLICY_FIELD_HEADER, + }, + }, + }, + }, + }, + }, + expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "field_value" field: field "HASH_POLICY_FIELD_HEADER" was specified without a field_value`, + }, + "lbconfig: matchvalue without field": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_MAGLEV, + HashPolicies: []*pbmesh.HashPolicy{ + { + FieldValue: "my-cookie", + }, + }, + }, + }, + }, + }, + expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "field_value" field: requires a field to apply to`, + }, + "lbconfig: ring hash kitchen sink": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_RING_HASH, + Config: &pbmesh.LoadBalancer_RingHashConfig{ + RingHashConfig: &pbmesh.RingHashConfig{ + MaximumRingSize: 10, + MinimumRingSize: 2, + }, + }, + HashPolicies: []*pbmesh.HashPolicy{ + { + Field: pbmesh.HashPolicyField_HASH_POLICY_FIELD_COOKIE, + FieldValue: "my-cookie", + }, + { + Field: pbmesh.HashPolicyField_HASH_POLICY_FIELD_HEADER, + FieldValue: "alt-header", + Terminal: true, + }, + }, + }, + }, + }, + }, + }, + "lbconfig: least request kitchen sink": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_LEAST_REQUEST, + Config: &pbmesh.LoadBalancer_LeastRequestConfig{ + LeastRequestConfig: &pbmesh.LeastRequestConfig{ + ChoiceCount: 10, + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} diff --git a/internal/mesh/internal/types/grpc_route.go b/internal/mesh/internal/types/grpc_route.go index cdd4669bca6f..eb12fd733ba4 100644 --- a/internal/mesh/internal/types/grpc_route.go +++ b/internal/mesh/internal/types/grpc_route.go @@ -4,6 +4,10 @@ package types import ( + "errors" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/consul/internal/resource" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -25,8 +29,183 @@ var ( func RegisterGRPCRoute(r resource.Registry) { r.Register(resource.Registration{ - Type: GRPCRouteV1Alpha1Type, - Proto: &pbmesh.GRPCRoute{}, - Validate: nil, + Type: GRPCRouteV1Alpha1Type, + Proto: &pbmesh.GRPCRoute{}, + // TODO(rb): normalize parent/backend ref tenancies in a Mutate hook + Validate: ValidateGRPCRoute, }) } + +func ValidateGRPCRoute(res *pbresource.Resource) error { + var route pbmesh.GRPCRoute + + // TODO(rb):sync common stuff from HTTPRoute + + if err := res.Data.UnmarshalTo(&route); err != nil { + return resource.NewErrDataParse(&route, err) + } + + var merr error + if err := validateParentRefs(route.ParentRefs); err != nil { + merr = multierror.Append(merr, err) + } + + if len(route.Hostnames) > 0 { + merr = multierror.Append(merr, resource.ErrInvalidField{ + Name: "hostnames", + Wrapped: errors.New("should not populate hostnames"), + }) + } + + for i, rule := range route.Rules { + wrapRuleErr := func(err error) error { + return resource.ErrInvalidListElement{ + Name: "rules", + Index: i, + Wrapped: err, + } + } + + // TODO(rb): port a bunch of validation from ServiceRouterConfigEntry.Validate + + for j, match := range rule.Matches { + wrapMatchErr := func(err error) error { + return wrapRuleErr(resource.ErrInvalidListElement{ + Name: "matches", + Index: j, + Wrapped: err, + }) + } + + if match.Method != nil && match.Method.Type == pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_UNSPECIFIED { + merr = multierror.Append(merr, wrapMatchErr( + resource.ErrInvalidField{ + Name: "method", + Wrapped: resource.ErrMissing, + }, + )) + } + + for k, header := range match.Headers { + wrapMatchHeaderErr := func(err error) error { + return wrapRuleErr(resource.ErrInvalidListElement{ + Name: "headers", + Index: k, + Wrapped: err, + }) + } + + if header.Type == pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_UNSPECIFIED { + merr = multierror.Append(merr, wrapMatchHeaderErr( + resource.ErrInvalidField{ + Name: "type", + Wrapped: resource.ErrMissing, + }, + )) + } + } + } + + for j, filter := range rule.Filters { + wrapFilterErr := func(err error) error { + return wrapRuleErr(resource.ErrInvalidListElement{ + Name: "filters", + Index: j, + Wrapped: err, + }) + } + set := 0 + if filter.RequestHeaderModifier != nil { + set++ + } + if filter.ResponseHeaderModifier != nil { + set++ + } + if filter.UrlRewrite != nil { + set++ + if filter.UrlRewrite.PathPrefix == "" { + merr = multierror.Append(merr, wrapFilterErr( + resource.ErrInvalidField{ + Name: "url_rewrite", + Wrapped: resource.ErrInvalidField{ + Name: "path_prefix", + Wrapped: errors.New("field should not be empty if enclosing section is set"), + }, + }, + )) + } + } + if set != 1 { + merr = multierror.Append(merr, wrapFilterErr( + errors.New("exactly one of request_header_modifier, response_header_modifier, or url_rewrite"), + )) + } + } + + if len(rule.BackendRefs) == 0 { + /* + BackendRefs (optional)¶ + + BackendRefs defines API objects where matching requests should be + sent. If unspecified, the rule performs no forwarding. If + unspecified and no filters are specified that would result in a + response being sent, a 404 error code is returned. + */ + merr = multierror.Append(merr, wrapRuleErr( + resource.ErrInvalidField{ + Name: "backend_refs", + Wrapped: resource.ErrEmpty, + }, + )) + } + for j, hbref := range rule.BackendRefs { + wrapBackendRefErr := func(err error) error { + return wrapRuleErr(resource.ErrInvalidListElement{ + Name: "backend_refs", + Index: j, + Wrapped: err, + }) + } + for _, err := range validateBackendRef(hbref.BackendRef) { + merr = multierror.Append(merr, wrapBackendRefErr( + resource.ErrInvalidField{ + Name: "backend_ref", + Wrapped: err, + }, + )) + } + + if len(hbref.Filters) > 0 { + merr = multierror.Append(merr, wrapBackendRefErr( + resource.ErrInvalidField{ + Name: "filters", + Wrapped: errors.New("filters are not supported at this level yet"), + }, + )) + } + } + + if rule.Timeouts != nil { + for _, err := range validateHTTPTimeouts(rule.Timeouts) { + merr = multierror.Append(merr, wrapRuleErr( + resource.ErrInvalidField{ + Name: "timeouts", + Wrapped: err, + }, + )) + } + } + if rule.Retries != nil { + for _, err := range validateHTTPRetries(rule.Retries) { + merr = multierror.Append(merr, wrapRuleErr( + resource.ErrInvalidField{ + Name: "retries", + Wrapped: err, + }, + )) + } + } + } + + return merr +} diff --git a/internal/mesh/internal/types/grpc_route_test.go b/internal/mesh/internal/types/grpc_route_test.go new file mode 100644 index 000000000000..81f795ad60cf --- /dev/null +++ b/internal/mesh/internal/types/grpc_route_test.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/hashicorp/consul/sdk/testutil" +) + +func TestValidateGRPCRoute(t *testing.T) { + type testcase struct { + route *pbmesh.GRPCRoute + expectErr string + } + + run := func(t *testing.T, tc testcase) { + res := resourcetest.Resource(GRPCRouteType, "api"). + WithData(t, tc.route). + Build() + + err := ValidateGRPCRoute(res) + + // Verify that validate didn't actually change the object. + got := resourcetest.MustDecode[pbmesh.GRPCRoute, *pbmesh.GRPCRoute](t, res) + prototest.AssertDeepEqual(t, tc.route, got.Data) + + if tc.expectErr == "" { + require.NoError(t, err) + } else { + testutil.RequireErrorContains(t, err, tc.expectErr) + } + } + + cases := map[string]testcase{ + "hostnames not supported for services": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Hostnames: []string{"foo.local"}, + }, + expectErr: `invalid "hostnames" field: should not populate hostnames`, + }, + } + + // TODO(rb): add rest of tests for the matching logic + // TODO(rb): add rest of tests for the retry and timeout logic + + // Add common parent refs test cases. + for name, parentTC := range getXRouteParentRefTestCases() { + cases["parent-ref: "+name] = testcase{ + route: &pbmesh.GRPCRoute{ + ParentRefs: parentTC.refs, + }, + expectErr: parentTC.expectErr, + } + } + // add common backend ref test cases. + for name, backendTC := range getXRouteBackendRefTestCases() { + var refs []*pbmesh.GRPCBackendRef + for _, br := range backendTC.refs { + refs = append(refs, &pbmesh.GRPCBackendRef{ + BackendRef: br, + }) + } + cases["backend-ref: "+name] = testcase{ + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{ + {BackendRefs: refs}, + }, + }, + expectErr: backendTC.expectErr, + } + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} diff --git a/internal/mesh/internal/types/http_route.go b/internal/mesh/internal/types/http_route.go index 6f5f284ed901..1995b7e14dc2 100644 --- a/internal/mesh/internal/types/http_route.go +++ b/internal/mesh/internal/types/http_route.go @@ -4,6 +4,14 @@ package types import ( + "errors" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-multierror" + + "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/resource" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -27,6 +35,546 @@ func RegisterHTTPRoute(r resource.Registry) { r.Register(resource.Registration{ Type: HTTPRouteV1Alpha1Type, Proto: &pbmesh.HTTPRoute{}, - Validate: nil, + Mutate: MutateHTTPRoute, + Validate: ValidateHTTPRoute, }) } + +func MutateHTTPRoute(res *pbresource.Resource) error { + var route pbmesh.HTTPRoute + + if err := res.Data.UnmarshalTo(&route); err != nil { + return resource.NewErrDataParse(&route, err) + } + + changed := false + + for _, rule := range route.Rules { + for _, match := range rule.Matches { + if match.Method != "" { + norm := strings.ToUpper(match.Method) + if match.Method != norm { + match.Method = norm + changed = true + } + } + } + } + + // TODO(rb): normalize parent/backend ref tenancies + + if !changed { + return nil + } + + return res.Data.MarshalFrom(&route) +} + +func ValidateHTTPRoute(res *pbresource.Resource) error { + var route pbmesh.HTTPRoute + + if err := res.Data.UnmarshalTo(&route); err != nil { + return resource.NewErrDataParse(&route, err) + } + + var merr error + if err := validateParentRefs(route.ParentRefs); err != nil { + merr = multierror.Append(merr, err) + } + + if len(route.Hostnames) > 0 { + merr = multierror.Append(merr, resource.ErrInvalidField{ + Name: "hostnames", + Wrapped: errors.New("should not populate hostnames"), + }) + } + + for i, rule := range route.Rules { + wrapRuleErr := func(err error) error { + return resource.ErrInvalidListElement{ + Name: "rules", + Index: i, + Wrapped: err, + } + } + + // TODO(rb): port a bunch of validation from ServiceRouterConfigEntry.Validate + + for j, match := range rule.Matches { + wrapMatchErr := func(err error) error { + return wrapRuleErr(resource.ErrInvalidListElement{ + Name: "matches", + Index: j, + Wrapped: err, + }) + } + + if match.Path != nil { + wrapMatchPathErr := func(err error) error { + return wrapMatchErr(resource.ErrInvalidField{ + Name: "path", + Wrapped: err, + }) + } + switch match.Path.Type { + case pbmesh.PathMatchType_PATH_MATCH_TYPE_UNSPECIFIED: + merr = multierror.Append(merr, wrapMatchPathErr( + resource.ErrInvalidField{ + Name: "type", + Wrapped: resource.ErrMissing, + }, + )) + case pbmesh.PathMatchType_PATH_MATCH_TYPE_EXACT: + if !strings.HasPrefix(match.Path.Value, "/") { + merr = multierror.Append(merr, wrapMatchPathErr( + resource.ErrInvalidField{ + Name: "value", + Wrapped: fmt.Errorf("exact patch value does not start with '/': %q", match.Path.Value), + }, + )) + } + case pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX: + if !strings.HasPrefix(match.Path.Value, "/") { + merr = multierror.Append(merr, wrapMatchPathErr( + resource.ErrInvalidField{ + Name: "value", + Wrapped: fmt.Errorf("prefix patch value does not start with '/': %q", match.Path.Value), + }, + )) + } + } + } + + for k, hdr := range match.Headers { + wrapMatchHeaderErr := func(err error) error { + return wrapMatchErr(resource.ErrInvalidListElement{ + Name: "headers", + Index: k, + Wrapped: err, + }) + } + if hdr.Type == pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_UNSPECIFIED { + merr = multierror.Append(merr, wrapMatchHeaderErr( + resource.ErrInvalidField{ + Name: "type", + Wrapped: resource.ErrMissing, + }), + ) + } + if hdr.Name == "" { + merr = multierror.Append(merr, wrapMatchHeaderErr( + resource.ErrInvalidField{ + Name: "name", + Wrapped: resource.ErrMissing, + }), + ) + } + } + + for k, qm := range match.QueryParams { + wrapMatchParamErr := func(err error) error { + return wrapMatchErr(resource.ErrInvalidListElement{ + Name: "query_params", + Index: k, + Wrapped: err, + }) + } + if qm.Type == pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_UNSPECIFIED { + merr = multierror.Append(merr, wrapMatchParamErr( + resource.ErrInvalidField{ + Name: "type", + Wrapped: resource.ErrMissing, + }), + ) + } + if qm.Name == "" { + merr = multierror.Append(merr, wrapMatchParamErr( + resource.ErrInvalidField{ + Name: "name", + Wrapped: resource.ErrMissing, + }), + ) + } + } + + // This is just a simple placeholder validation stolen from the + // config entry version. + if match.Method != "" && !isValidHTTPMethod(match.Method) { + merr = multierror.Append(merr, wrapMatchErr( + resource.ErrInvalidField{ + Name: "method", + Wrapped: fmt.Errorf("not a valid http method: %q", match.Method), + }, + )) + } + } + + for j, filter := range rule.Filters { + wrapFilterErr := func(err error) error { + return wrapRuleErr(resource.ErrInvalidListElement{ + Name: "filters", + Index: j, + Wrapped: err, + }) + } + set := 0 + if filter.RequestHeaderModifier != nil { + set++ + } + if filter.ResponseHeaderModifier != nil { + set++ + } + if filter.UrlRewrite != nil { + set++ + if filter.UrlRewrite.PathPrefix == "" { + merr = multierror.Append(merr, wrapFilterErr( + resource.ErrInvalidField{ + Name: "url_rewrite", + Wrapped: resource.ErrInvalidField{ + Name: "path_prefix", + Wrapped: resource.ErrMissing, + }, + }, + )) + } + } + if set != 1 { + merr = multierror.Append(merr, wrapFilterErr( + errors.New("exactly one of request_header_modifier, response_header_modifier, or url_rewrite"), + )) + } + } + + if len(rule.BackendRefs) == 0 { + /* + BackendRefs (optional)¶ + + BackendRefs defines API objects where matching requests should be + sent. If unspecified, the rule performs no forwarding. If + unspecified and no filters are specified that would result in a + response being sent, a 404 error code is returned. + */ + merr = multierror.Append(merr, wrapRuleErr( + resource.ErrInvalidField{ + Name: "backend_refs", + Wrapped: resource.ErrEmpty, + }, + )) + } + for j, hbref := range rule.BackendRefs { + wrapBackendRefErr := func(err error) error { + return wrapRuleErr(resource.ErrInvalidListElement{ + Name: "backend_refs", + Index: j, + Wrapped: err, + }) + } + + for _, err := range validateBackendRef(hbref.BackendRef) { + merr = multierror.Append(merr, wrapBackendRefErr( + resource.ErrInvalidField{ + Name: "backend_ref", + Wrapped: err, + }, + )) + } + + if len(hbref.Filters) > 0 { + merr = multierror.Append(merr, wrapBackendRefErr( + resource.ErrInvalidField{ + Name: "filters", + Wrapped: errors.New("filters are not supported at this level yet"), + }, + )) + } + } + + if rule.Timeouts != nil { + for _, err := range validateHTTPTimeouts(rule.Timeouts) { + merr = multierror.Append(merr, wrapRuleErr( + resource.ErrInvalidField{ + Name: "timeouts", + Wrapped: err, + }, + )) + } + } + if rule.Retries != nil { + for _, err := range validateHTTPRetries(rule.Retries) { + merr = multierror.Append(merr, wrapRuleErr( + resource.ErrInvalidField{ + Name: "retries", + Wrapped: err, + }, + )) + } + } + } + + return merr +} + +func validateHTTPTimeouts(timeouts *pbmesh.HTTPRouteTimeouts) []error { + if timeouts == nil { + return nil + } + + var errs []error + + if timeouts.Request != nil { + val := timeouts.Request.AsDuration() + if val < 0 { + errs = append(errs, resource.ErrInvalidField{ + Name: "request", + Wrapped: fmt.Errorf("timeout cannot be negative: %v", val), + }) + } + } + if timeouts.BackendRequest != nil { + val := timeouts.BackendRequest.AsDuration() + if val < 0 { + errs = append(errs, resource.ErrInvalidField{ + Name: "request", + Wrapped: fmt.Errorf("timeout cannot be negative: %v", val), + }) + } + } + if timeouts.Idle != nil { + val := timeouts.Idle.AsDuration() + if val < 0 { + errs = append(errs, resource.ErrInvalidField{ + Name: "idle", + Wrapped: fmt.Errorf("timeout cannot be negative: %v", val), + }) + } + } + + return errs +} + +func validateHTTPRetries(retries *pbmesh.HTTPRouteRetries) []error { + if retries == nil { + return nil + } + + var errs []error + + for i, condition := range retries.OnConditions { + if !isValidRetryCondition(condition) { + errs = append(errs, resource.ErrInvalidListElement{ + Name: "on_conditions", + Index: i, + Wrapped: fmt.Errorf("not a valid retry condition: %q", condition), + }) + } + } + + return errs +} + +func validateBackendRef(backendRef *pbmesh.BackendReference) []error { + var errs []error + if backendRef == nil { + errs = append(errs, resource.ErrMissing) + + } else if backendRef.Ref == nil { + errs = append(errs, resource.ErrInvalidField{ + Name: "ref", + Wrapped: resource.ErrMissing, + }) + + } else { + if !IsServiceType(backendRef.Ref.Type) { + errs = append(errs, resource.ErrInvalidField{ + Name: "ref", + Wrapped: resource.ErrInvalidReferenceType{ + AllowedType: catalog.ServiceType, + }, + }) + } + + if backendRef.Ref.Section != "" { + errs = append(errs, resource.ErrInvalidField{ + Name: "ref", + Wrapped: resource.ErrInvalidField{ + Name: "section", + Wrapped: errors.New("section not supported for service backend refs"), + }, + }) + } + + if backendRef.Datacenter != "" { + errs = append(errs, resource.ErrInvalidField{ + Name: "datacenter", + Wrapped: errors.New("datacenter is not yet supported on backend refs"), + }) + } + } + return errs +} + +type portedRefKey struct { + Key resource.ReferenceKey + Port string +} + +func validateParentRefs(parentRefs []*pbmesh.ParentReference) error { + var merr error + if len(parentRefs) == 0 { + merr = multierror.Append(merr, resource.ErrInvalidField{ + Name: "parent_refs", + Wrapped: resource.ErrEmpty, + }) + } + + var ( + seen = make(map[portedRefKey]struct{}) + seenAny = make(map[resource.ReferenceKey][]string) + ) + for i, parent := range parentRefs { + wrapErr := func(err error) error { + return resource.ErrInvalidListElement{ + Name: "parent_refs", + Index: i, + Wrapped: err, + } + } + if parent.Ref == nil { + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: resource.ErrMissing, + }, + )) + } else { + if !IsServiceType(parent.Ref.Type) { + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: resource.ErrInvalidReferenceType{ + AllowedType: catalog.ServiceType, + }, + }, + )) + } + if parent.Ref.Section != "" { + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: resource.ErrInvalidField{ + Name: "section", + Wrapped: errors.New("section not supported for service parent refs"), + }, + }, + )) + } + + prk := portedRefKey{ + Key: resource.NewReferenceKey(parent.Ref), + Port: parent.Port, + } + + _, portExist := seen[prk] + + if parent.Port == "" { + coveredPorts, exactExists := seenAny[prk.Key] + + if portExist { // check for duplicate wild + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: fmt.Errorf( + "parent ref %q for wildcard port exists twice", + resource.ReferenceToString(parent.Ref), + ), + }, + )) + } else if exactExists { // check for existing exact + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: fmt.Errorf( + "parent ref %q for ports %v covered by wildcard port already", + resource.ReferenceToString(parent.Ref), + coveredPorts, + ), + }, + )) + } else { + seen[prk] = struct{}{} + } + + } else { + prkWild := prk + prkWild.Port = "" + _, wildExist := seen[prkWild] + + if portExist { // check for duplicate exact + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: fmt.Errorf( + "parent ref %q for port %q exists twice", + resource.ReferenceToString(parent.Ref), + parent.Port, + ), + }, + )) + } else if wildExist { // check for existing wild + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: fmt.Errorf( + "parent ref %q for port %q covered by wildcard port already", + resource.ReferenceToString(parent.Ref), + parent.Port, + ), + }, + )) + } else { + seen[prk] = struct{}{} + seenAny[prk.Key] = append(seenAny[prk.Key], parent.Port) + } + } + } + } + + return merr +} + +func isValidHTTPMethod(method string) bool { + switch method { + case http.MethodGet, + http.MethodHead, + http.MethodPost, + http.MethodPut, + http.MethodPatch, + http.MethodDelete, + http.MethodConnect, + http.MethodOptions, + http.MethodTrace: + return true + default: + return false + } +} + +func isValidRetryCondition(retryOn string) bool { + switch retryOn { + case "5xx", + "gateway-error", + "reset", + "connect-failure", + "envoy-ratelimited", + "retriable-4xx", + "refused-stream", + "cancelled", + "deadline-exceeded", + "internal", + "resource-exhausted", + "unavailable": + return true + default: + return false + } +} diff --git a/internal/mesh/internal/types/http_route_test.go b/internal/mesh/internal/types/http_route_test.go new file mode 100644 index 000000000000..ec512d41474f --- /dev/null +++ b/internal/mesh/internal/types/http_route_test.go @@ -0,0 +1,236 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/hashicorp/consul/sdk/testutil" +) + +// TODO(rb): add mutation tests + +func TestValidateHTTPRoute(t *testing.T) { + type testcase struct { + route *pbmesh.HTTPRoute + expectErr string + } + + run := func(t *testing.T, tc testcase) { + res := resourcetest.Resource(HTTPRouteType, "api"). + WithData(t, tc.route). + Build() + + err := ValidateHTTPRoute(res) + + // Verify that validate didn't actually change the object. + got := resourcetest.MustDecode[pbmesh.HTTPRoute, *pbmesh.HTTPRoute](t, res) + prototest.AssertDeepEqual(t, tc.route, got.Data) + + if tc.expectErr == "" { + require.NoError(t, err) + } else { + testutil.RequireErrorContains(t, err, tc.expectErr) + } + } + + cases := map[string]testcase{ + "hostnames not supported for services": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Hostnames: []string{"foo.local"}, + }, + expectErr: `invalid "hostnames" field: should not populate hostnames`, + }, + } + + // TODO(rb): add rest of tests for the matching logic + // TODO(rb): add rest of tests for the retry and timeout logic + + // Add common parent refs test cases. + for name, parentTC := range getXRouteParentRefTestCases() { + cases["parent-ref: "+name] = testcase{ + route: &pbmesh.HTTPRoute{ + ParentRefs: parentTC.refs, + }, + expectErr: parentTC.expectErr, + } + } + // add common backend ref test cases. + for name, backendTC := range getXRouteBackendRefTestCases() { + var refs []*pbmesh.HTTPBackendRef + for _, br := range backendTC.refs { + refs = append(refs, &pbmesh.HTTPBackendRef{ + BackendRef: br, + }) + } + cases["backend-ref: "+name] = testcase{ + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{ + {BackendRefs: refs}, + }, + }, + expectErr: backendTC.expectErr, + } + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} + +type xRouteParentRefTestcase struct { + refs []*pbmesh.ParentReference + expectErr string +} + +func getXRouteParentRefTestCases() map[string]xRouteParentRefTestcase { + return map[string]xRouteParentRefTestcase{ + "no parent refs": { + expectErr: `invalid "parent_refs" field: cannot be empty`, + }, + "parent ref with nil ref": { + refs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "api", ""), + { + Ref: nil, + Port: "http", + }, + }, + expectErr: `invalid element at index 1 of list "parent_refs": invalid "ref" field: missing required field`, + }, + "parent ref with bad type ref": { + refs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "api", ""), + newParentRef(catalog.WorkloadType, "api", ""), + }, + expectErr: `invalid element at index 1 of list "parent_refs": invalid "ref" field: reference must have type catalog.v1alpha1.Service`, + }, + "parent ref with section": { + refs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "api", ""), + { + Ref: resourcetest.Resource(catalog.ServiceType, "web").Reference("section2"), + Port: "http", + }, + }, + expectErr: `invalid element at index 1 of list "parent_refs": invalid "ref" field: invalid "section" field: section not supported for service parent refs`, + }, + "duplicate exact parents": { + refs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "api", "http"), + newParentRef(catalog.ServiceType, "api", "http"), + }, + expectErr: `invalid element at index 1 of list "parent_refs": invalid "ref" field: parent ref "catalog.v1alpha1.Service/default.local.default/api" for port "http" exists twice`, + }, + "duplicate wild parents": { + refs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "api", ""), + newParentRef(catalog.ServiceType, "api", ""), + }, + expectErr: `invalid element at index 1 of list "parent_refs": invalid "ref" field: parent ref "catalog.v1alpha1.Service/default.local.default/api" for wildcard port exists twice`, + }, + "duplicate parents via exact+wild overlap": { + refs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "api", "http"), + newParentRef(catalog.ServiceType, "api", ""), + }, + expectErr: `invalid element at index 1 of list "parent_refs": invalid "ref" field: parent ref "catalog.v1alpha1.Service/default.local.default/api" for ports [http] covered by wildcard port already`, + }, + "good single parent ref": { + refs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "api", "http"), + }, + }, + "good muliple parent refs": { + refs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "api", "http"), + newParentRef(catalog.ServiceType, "web", ""), + }, + }, + } +} + +type xRouteBackendRefTestcase struct { + refs []*pbmesh.BackendReference + expectErr string +} + +func getXRouteBackendRefTestCases() map[string]xRouteBackendRefTestcase { + return map[string]xRouteBackendRefTestcase{ + "no backend refs": { + expectErr: `invalid "backend_refs" field: cannot be empty`, + }, + "backend ref with nil ref": { + refs: []*pbmesh.BackendReference{ + newBackendRef(catalog.ServiceType, "api", ""), + { + Ref: nil, + Port: "http", + }, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 1 of list "backend_refs": invalid "backend_ref" field: invalid "ref" field: missing required field`, + }, + "backend ref with bad type ref": { + refs: []*pbmesh.BackendReference{ + newBackendRef(catalog.ServiceType, "api", ""), + newBackendRef(catalog.WorkloadType, "api", ""), + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 1 of list "backend_refs": invalid "backend_ref" field: invalid "ref" field: reference must have type catalog.v1alpha1.Service`, + }, + "backend ref with section": { + refs: []*pbmesh.BackendReference{ + newBackendRef(catalog.ServiceType, "api", ""), + { + Ref: resourcetest.Resource(catalog.ServiceType, "web").Reference("section2"), + Port: "http", + }, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 1 of list "backend_refs": invalid "backend_ref" field: invalid "ref" field: invalid "section" field: section not supported for service backend refs`, + }, + "backend ref with datacenter": { + refs: []*pbmesh.BackendReference{ + newBackendRef(catalog.ServiceType, "api", ""), + { + Ref: newRef(catalog.ServiceType, "db"), + Port: "http", + Datacenter: "dc2", + }, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 1 of list "backend_refs": invalid "backend_ref" field: invalid "datacenter" field: datacenter is not yet supported on backend refs`, + }, + } +} + +func newRef(typ *pbresource.Type, name string) *pbresource.Reference { + return resourcetest.Resource(typ, name).Reference("") +} + +func newBackendRef(typ *pbresource.Type, name, port string) *pbmesh.BackendReference { + return &pbmesh.BackendReference{ + Ref: newRef(typ, name), + Port: port, + } +} + +func newParentRef(typ *pbresource.Type, name, port string) *pbmesh.ParentReference { + return &pbmesh.ParentReference{ + Ref: newRef(typ, name), + Port: port, + } +} diff --git a/internal/mesh/internal/types/proxy_configuration.go b/internal/mesh/internal/types/proxy_configuration.go index 29b43ce7a3f1..e85a00494f0f 100644 --- a/internal/mesh/internal/types/proxy_configuration.go +++ b/internal/mesh/internal/types/proxy_configuration.go @@ -25,8 +25,9 @@ var ( func RegisterProxyConfiguration(r resource.Registry) { r.Register(resource.Registration{ - Type: ProxyConfigurationV1Alpha1Type, - Proto: &pbmesh.ProxyConfiguration{}, + Type: ProxyConfigurationV1Alpha1Type, + Proto: &pbmesh.ProxyConfiguration{}, + // TODO(rb): add validation for proxy configuration Validate: nil, }) } diff --git a/internal/mesh/internal/types/tcp_route.go b/internal/mesh/internal/types/tcp_route.go index e217d07490bd..c7a6ccf87430 100644 --- a/internal/mesh/internal/types/tcp_route.go +++ b/internal/mesh/internal/types/tcp_route.go @@ -4,6 +4,8 @@ package types import ( + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/consul/internal/resource" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -25,8 +27,69 @@ var ( func RegisterTCPRoute(r resource.Registry) { r.Register(resource.Registration{ - Type: TCPRouteV1Alpha1Type, - Proto: &pbmesh.TCPRoute{}, - Validate: nil, + Type: TCPRouteV1Alpha1Type, + Proto: &pbmesh.TCPRoute{}, + // TODO(rb): normalize parent/backend ref tenancies in a Mutate hook + Validate: ValidateTCPRoute, }) } + +func ValidateTCPRoute(res *pbresource.Resource) error { + var route pbmesh.TCPRoute + + if err := res.Data.UnmarshalTo(&route); err != nil { + return resource.NewErrDataParse(&route, err) + } + + var merr error + + if err := validateParentRefs(route.ParentRefs); err != nil { + merr = multierror.Append(merr, err) + } + + for i, rule := range route.Rules { + wrapRuleErr := func(err error) error { + return resource.ErrInvalidListElement{ + Name: "rules", + Index: i, + Wrapped: err, + } + } + + if len(rule.BackendRefs) == 0 { + /* + BackendRefs (optional)¶ + + BackendRefs defines API objects where matching requests should be + sent. If unspecified, the rule performs no forwarding. If + unspecified and no filters are specified that would result in a + response being sent, a 404 error code is returned. + */ + merr = multierror.Append(merr, wrapRuleErr( + resource.ErrInvalidField{ + Name: "backend_refs", + Wrapped: resource.ErrEmpty, + }, + )) + } + for j, hbref := range rule.BackendRefs { + wrapBackendRefErr := func(err error) error { + return wrapRuleErr(resource.ErrInvalidListElement{ + Name: "backend_refs", + Index: j, + Wrapped: err, + }) + } + for _, err := range validateBackendRef(hbref.BackendRef) { + merr = multierror.Append(merr, wrapBackendRefErr( + resource.ErrInvalidField{ + Name: "backend_ref", + Wrapped: err, + }, + )) + } + } + } + + return merr +} diff --git a/internal/mesh/internal/types/tcp_route_test.go b/internal/mesh/internal/types/tcp_route_test.go new file mode 100644 index 000000000000..0118fc010bd8 --- /dev/null +++ b/internal/mesh/internal/types/tcp_route_test.go @@ -0,0 +1,79 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/hashicorp/consul/sdk/testutil" +) + +func TestValidateTCPRoute(t *testing.T) { + type testcase struct { + route *pbmesh.TCPRoute + expectErr string + } + + run := func(t *testing.T, tc testcase) { + res := resourcetest.Resource(TCPRouteType, "api"). + WithData(t, tc.route). + Build() + + err := ValidateTCPRoute(res) + + // Verify that validate didn't actually change the object. + got := resourcetest.MustDecode[pbmesh.TCPRoute, *pbmesh.TCPRoute](t, res) + prototest.AssertDeepEqual(t, tc.route, got.Data) + + if tc.expectErr == "" { + require.NoError(t, err) + } else { + testutil.RequireErrorContains(t, err, tc.expectErr) + } + } + + cases := map[string]testcase{} + + // Add common parent refs test cases. + for name, parentTC := range getXRouteParentRefTestCases() { + cases["parent-ref: "+name] = testcase{ + route: &pbmesh.TCPRoute{ + ParentRefs: parentTC.refs, + }, + expectErr: parentTC.expectErr, + } + } + // add common backend ref test cases. + for name, backendTC := range getXRouteBackendRefTestCases() { + var refs []*pbmesh.TCPBackendRef + for _, br := range backendTC.refs { + refs = append(refs, &pbmesh.TCPBackendRef{ + BackendRef: br, + }) + } + cases["backend-ref: "+name] = testcase{ + route: &pbmesh.TCPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.TCPRouteRule{ + {BackendRefs: refs}, + }, + }, + expectErr: backendTC.expectErr, + } + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} diff --git a/internal/mesh/internal/types/util.go b/internal/mesh/internal/types/util.go new file mode 100644 index 000000000000..5a0e45d0201e --- /dev/null +++ b/internal/mesh/internal/types/util.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package types + +import ( + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +func IsRouteType(typ *pbresource.Type) bool { + switch { + case resource.EqualType(typ, HTTPRouteType), + resource.EqualType(typ, GRPCRouteType), + resource.EqualType(typ, TCPRouteType): + return true + } + return false +} + +func IsFailoverPolicyType(typ *pbresource.Type) bool { + switch { + case resource.EqualType(typ, catalog.FailoverPolicyType): + return true + } + return false +} + +func IsDestinationPolicyType(typ *pbresource.Type) bool { + switch { + case resource.EqualType(typ, DestinationPolicyType): + return true + } + return false +} + +func IsServiceType(typ *pbresource.Type) bool { + switch { + case resource.EqualType(typ, catalog.ServiceType): + return true + } + return false +} + +func IsComputedRoutesType(typ *pbresource.Type) bool { + switch { + case resource.EqualType(typ, ComputedRoutesType): + return true + } + return false +} diff --git a/internal/mesh/internal/types/xroute.go b/internal/mesh/internal/types/xroute.go new file mode 100644 index 000000000000..b7b190269623 --- /dev/null +++ b/internal/mesh/internal/types/xroute.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package types + +import ( + "google.golang.org/protobuf/proto" + + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" +) + +type XRouteData interface { + proto.Message + XRouteWithRefs +} + +type XRouteWithRefs interface { + GetParentRefs() []*pbmesh.ParentReference + GetUnderlyingBackendRefs() []*pbmesh.BackendReference +} diff --git a/proto-public/pbmesh/v1alpha1/xroute_addons.go b/proto-public/pbmesh/v1alpha1/xroute_addons.go new file mode 100644 index 000000000000..e85177d5160e --- /dev/null +++ b/proto-public/pbmesh/v1alpha1/xroute_addons.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package meshv1alpha1 + +// GetUnderlyingBackendRefs will collect BackendReferences from all rules and +// bundle them up in one slice, unwrapping the HTTP-specifics in the process. +// +// This implements an XRouteWithRefs interface in the internal/mesh package. +// +// NOTE: no deduplication occurs. +func (x *HTTPRoute) GetUnderlyingBackendRefs() []*BackendReference { + if x == nil { + return nil + } + + estimate := 0 + for _, rule := range x.Rules { + estimate += len(rule.BackendRefs) + } + + backendRefs := make([]*BackendReference, 0, estimate) + for _, rule := range x.Rules { + for _, backendRef := range rule.BackendRefs { + backendRefs = append(backendRefs, backendRef.BackendRef) + } + } + return backendRefs +} + +// GetUnderlyingBackendRefs will collect BackendReferences from all rules and +// bundle them up in one slice, unwrapping the GRPC-specifics in the process. +// +// This implements an XRouteWithRefs interface in the internal/mesh package. +// +// NOTE: no deduplication occurs. +func (x *GRPCRoute) GetUnderlyingBackendRefs() []*BackendReference { + if x == nil { + return nil + } + + estimate := 0 + for _, rule := range x.Rules { + estimate += len(rule.BackendRefs) + } + + backendRefs := make([]*BackendReference, 0, estimate) + for _, rule := range x.Rules { + for _, backendRef := range rule.BackendRefs { + backendRefs = append(backendRefs, backendRef.BackendRef) + } + } + return backendRefs +} + +// GetUnderlyingBackendRefs will collect BackendReferences from all rules and +// bundle them up in one slice, unwrapping the TCP-specifics in the process. +// +// This implements an XRouteWithRefs interface in the internal/mesh package. +// +// NOTE: no deduplication occurs. +func (x *TCPRoute) GetUnderlyingBackendRefs() []*BackendReference { + if x == nil { + return nil + } + + estimate := 0 + for _, rule := range x.Rules { + estimate += len(rule.BackendRefs) + } + + backendRefs := make([]*BackendReference, 0, estimate) + + for _, rule := range x.Rules { + for _, backendRef := range rule.BackendRefs { + backendRefs = append(backendRefs, backendRef.BackendRef) + } + } + return backendRefs +} + +// IsHashBased returns true if the policy is a hash-based policy such as maglev +// or ring hash. +func (p LoadBalancerPolicy) IsHashBased() bool { + switch p { + case LoadBalancerPolicy_LOAD_BALANCER_POLICY_MAGLEV, + LoadBalancerPolicy_LOAD_BALANCER_POLICY_RING_HASH: + return true + } + return false +} diff --git a/proto-public/pbmesh/v1alpha1/xroute_addons_test.go b/proto-public/pbmesh/v1alpha1/xroute_addons_test.go new file mode 100644 index 000000000000..ce15282de825 --- /dev/null +++ b/proto-public/pbmesh/v1alpha1/xroute_addons_test.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package meshv1alpha1 + +import ( + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + + pbresource "github.com/hashicorp/consul/proto-public/pbresource" +) + +type routeWithAddons interface { + proto.Message + GetUnderlyingBackendRefs() []*BackendReference +} + +func TestXRoute_GetUnderlyingBackendRefs(t *testing.T) { + type testcase struct { + route routeWithAddons + expect []*BackendReference + } + + run := func(t *testing.T, tc testcase) { + got := tc.route.GetUnderlyingBackendRefs() + require.ElementsMatch(t, stringifyList(tc.expect), stringifyList(got)) + } + + cases := map[string]testcase{ + "http: nil": { + route: (*HTTPRoute)(nil), + }, + "grpc: nil": { + route: (*GRPCRoute)(nil), + }, + "tcp: nil": { + route: (*TCPRoute)(nil), + }, + "http: kitchen sink": { + route: &HTTPRoute{ + Rules: []*HTTPRouteRule{ + {BackendRefs: []*HTTPBackendRef{ + {BackendRef: newBackendRef("aa")}, + }}, + {BackendRefs: []*HTTPBackendRef{ + {BackendRef: newBackendRef("bb")}, + }}, + {BackendRefs: []*HTTPBackendRef{ + {BackendRef: newBackendRef("cc")}, + {BackendRef: newBackendRef("dd")}, + }}, + {BackendRefs: []*HTTPBackendRef{ + {BackendRef: newBackendRef("ee")}, + {BackendRef: newBackendRef("ff")}, + }}, + }, + }, + expect: []*BackendReference{ + newBackendRef("aa"), + newBackendRef("bb"), + newBackendRef("cc"), + newBackendRef("dd"), + newBackendRef("ee"), + newBackendRef("ff"), + }, + }, + "grpc: kitchen sink": { + route: &GRPCRoute{ + Rules: []*GRPCRouteRule{ + {BackendRefs: []*GRPCBackendRef{ + {BackendRef: newBackendRef("aa")}, + }}, + {BackendRefs: []*GRPCBackendRef{ + {BackendRef: newBackendRef("bb")}, + }}, + {BackendRefs: []*GRPCBackendRef{ + {BackendRef: newBackendRef("cc")}, + {BackendRef: newBackendRef("dd")}, + }}, + {BackendRefs: []*GRPCBackendRef{ + {BackendRef: newBackendRef("ee")}, + {BackendRef: newBackendRef("ff")}, + }}, + }, + }, + expect: []*BackendReference{ + newBackendRef("aa"), + newBackendRef("bb"), + newBackendRef("cc"), + newBackendRef("dd"), + newBackendRef("ee"), + newBackendRef("ff"), + }, + }, + "tcp: kitchen sink": { + route: &TCPRoute{ + Rules: []*TCPRouteRule{ + {BackendRefs: []*TCPBackendRef{ + {BackendRef: newBackendRef("aa")}, + }}, + {BackendRefs: []*TCPBackendRef{ + {BackendRef: newBackendRef("bb")}, + }}, + {BackendRefs: []*TCPBackendRef{ + {BackendRef: newBackendRef("cc")}, + {BackendRef: newBackendRef("dd")}, + }}, + {BackendRefs: []*TCPBackendRef{ + {BackendRef: newBackendRef("ee")}, + {BackendRef: newBackendRef("ff")}, + }}, + }, + }, + expect: []*BackendReference{ + newBackendRef("aa"), + newBackendRef("bb"), + newBackendRef("cc"), + newBackendRef("dd"), + newBackendRef("ee"), + newBackendRef("ff"), + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func protoToString[V proto.Message](pb V) string { + m := protojson.MarshalOptions{ + Indent: " ", + } + gotJSON, err := m.Marshal(pb) + if err != nil { + return "" + } + return string(gotJSON) +} + +func newRouteRef(name string) *pbresource.Reference { + return &pbresource.Reference{ + Type: &pbresource.Type{ + Group: "fake", + GroupVersion: "v1alpha1", + Kind: "fake", + }, + Tenancy: &pbresource.Tenancy{ + Partition: "default", + Namespace: "default", + PeerName: "local", + }, + Name: name, + } +} + +func newBackendRef(name string) *BackendReference { + return &BackendReference{ + Ref: newRouteRef(name), + } +} + +func stringifyList[V proto.Message](list []V) []string { + out := make([]string, 0, len(list)) + for _, item := range list { + out = append(out, protoToString(item)) + } + return out +} From 55f902744d09ce5000622be8d7198f4fb9a34f23 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Mon, 21 Aug 2023 14:20:47 -0500 Subject: [PATCH 03/16] use consistent casing --- internal/mesh/internal/types/destination_policy.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/mesh/internal/types/destination_policy.go b/internal/mesh/internal/types/destination_policy.go index d9f076add2dd..ca41a7fbc438 100644 --- a/internal/mesh/internal/types/destination_policy.go +++ b/internal/mesh/internal/types/destination_policy.go @@ -88,7 +88,7 @@ func ValidateDestinationPolicy(res *pbresource.Resource) error { if _, ok := lb.Config.(*pbmesh.LoadBalancer_RingHashConfig); ok { merr = multierror.Append(merr, wrapLBErr(resource.ErrInvalidField{ Name: "config", - Wrapped: fmt.Errorf("RingHashConfig specified for incompatible load balancing policy %q", lb.Policy), + Wrapped: fmt.Errorf("ring_hash_config specified for incompatible load balancing policy %q", lb.Policy), })) } } @@ -97,7 +97,7 @@ func ValidateDestinationPolicy(res *pbresource.Resource) error { if _, ok := lb.Config.(*pbmesh.LoadBalancer_LeastRequestConfig); ok { merr = multierror.Append(merr, wrapLBErr(resource.ErrInvalidField{ Name: "config", - Wrapped: fmt.Errorf("LeastRequestConfig specified for incompatible load balancing policy %q", lb.Policy), + Wrapped: fmt.Errorf("least_request_config specified for incompatible load balancing policy %q", lb.Policy), })) } } From 400c5f6a1382a78ec9814c3e607b41f62a63f77c Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Mon, 21 Aug 2023 14:22:28 -0500 Subject: [PATCH 04/16] unbreak the tests --- internal/mesh/internal/types/destination_policy_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/mesh/internal/types/destination_policy_test.go b/internal/mesh/internal/types/destination_policy_test.go index 6654ae2aa7fb..0d510f876f38 100644 --- a/internal/mesh/internal/types/destination_policy_test.go +++ b/internal/mesh/internal/types/destination_policy_test.go @@ -122,7 +122,7 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, }, - expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid "config" field: LeastRequestConfig specified for incompatible load balancing policy "LOAD_BALANCER_POLICY_RING_HASH"`, + expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid "config" field: least_request_config specified for incompatible load balancing policy "LOAD_BALANCER_POLICY_RING_HASH"`, }, "lbpolicy: bad for ring hash config": { policy: &pbmesh.DestinationPolicy{ @@ -140,7 +140,7 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, }, - expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid "config" field: RingHashConfig specified for incompatible load balancing policy "LOAD_BALANCER_POLICY_LEAST_REQUEST"`, + expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid "config" field: ring_hash_config specified for incompatible load balancing policy "LOAD_BALANCER_POLICY_LEAST_REQUEST"`, }, "lbpolicy: good for least request config": { policy: &pbmesh.DestinationPolicy{ From ea14313f7029e3af8dbacdbe256c8d9240f90d15 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Mon, 21 Aug 2023 14:34:56 -0500 Subject: [PATCH 05/16] add http route mutation tests --- .../mesh/internal/types/http_route_test.go | 126 +++++++++++++++++- 1 file changed, 125 insertions(+), 1 deletion(-) diff --git a/internal/mesh/internal/types/http_route_test.go b/internal/mesh/internal/types/http_route_test.go index ec512d41474f..b8f344cbda66 100644 --- a/internal/mesh/internal/types/http_route_test.go +++ b/internal/mesh/internal/types/http_route_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/resource/resourcetest" @@ -16,7 +17,130 @@ import ( "github.com/hashicorp/consul/sdk/testutil" ) -// TODO(rb): add mutation tests +func TestMutateHTTPRoute(t *testing.T) { + type testcase struct { + route *pbmesh.HTTPRoute + expect *pbmesh.HTTPRoute + expectErr string + } + + run := func(t *testing.T, tc testcase) { + res := resourcetest.Resource(HTTPRouteType, "api"). + WithData(t, tc.route). + Build() + + err := MutateHTTPRoute(res) + + got := resourcetest.MustDecode[pbmesh.HTTPRoute, *pbmesh.HTTPRoute](t, res) + + if tc.expectErr == "" { + require.NoError(t, err) + + if tc.expect == nil { + tc.expect = proto.Clone(tc.route).(*pbmesh.HTTPRoute) + } + + prototest.AssertDeepEqual(t, tc.expect, got.Data) + } else { + testutil.RequireErrorContains(t, err, tc.expectErr) + } + } + + cases := map[string]testcase{ + "no-rules": { + route: &pbmesh.HTTPRoute{}, + }, + "rules-with-no-matches": { + route: &pbmesh.HTTPRoute{ + Rules: []*pbmesh.HTTPRouteRule{{ + // none + }}, + }, + }, + "rules-with-matches-no-methods": { + route: &pbmesh.HTTPRoute{ + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX, + Value: "/foo", + }, + }}, + }}, + }, + }, + "rules-with-matches-methods-uppercase": { + route: &pbmesh.HTTPRoute{ + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{ + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX, + Value: "/foo", + }, + Method: "GET", + }, + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX, + Value: "/bar", + }, + Method: "POST", + }, + }, + }}, + }, + }, + "rules-with-matches-methods-lowercase": { + route: &pbmesh.HTTPRoute{ + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{ + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX, + Value: "/foo", + }, + Method: "get", + }, + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX, + Value: "/bar", + }, + Method: "post", + }, + }, + }}, + }, + expect: &pbmesh.HTTPRoute{ + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{ + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX, + Value: "/foo", + }, + Method: "GET", + }, + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX, + Value: "/bar", + }, + Method: "POST", + }, + }, + }}, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} func TestValidateHTTPRoute(t *testing.T) { type testcase struct { From ae032e578aaad85dce4b0fa4f986206a26fd2a48 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Mon, 21 Aug 2023 15:06:43 -0500 Subject: [PATCH 06/16] add tests --- internal/mesh/internal/types/http_route.go | 2 - .../mesh/internal/types/http_route_test.go | 256 +++++++++++++++++- 2 files changed, 255 insertions(+), 3 deletions(-) diff --git a/internal/mesh/internal/types/http_route.go b/internal/mesh/internal/types/http_route.go index 1995b7e14dc2..73759b571fc7 100644 --- a/internal/mesh/internal/types/http_route.go +++ b/internal/mesh/internal/types/http_route.go @@ -197,8 +197,6 @@ func ValidateHTTPRoute(res *pbresource.Resource) error { } } - // This is just a simple placeholder validation stolen from the - // config entry version. if match.Method != "" && !isValidHTTPMethod(match.Method) { merr = multierror.Append(merr, wrapMatchErr( resource.ErrInvalidField{ diff --git a/internal/mesh/internal/types/http_route_test.go b/internal/mesh/internal/types/http_route_test.go index b8f344cbda66..5f4bb09ad724 100644 --- a/internal/mesh/internal/types/http_route_test.go +++ b/internal/mesh/internal/types/http_route_test.go @@ -153,7 +153,10 @@ func TestValidateHTTPRoute(t *testing.T) { WithData(t, tc.route). Build() - err := ValidateHTTPRoute(res) + err := MutateHTTPRoute(res) + require.NoError(t, err) + + err = ValidateHTTPRoute(res) // Verify that validate didn't actually change the object. got := resourcetest.MustDecode[pbmesh.HTTPRoute, *pbmesh.HTTPRoute](t, res) @@ -176,6 +179,248 @@ func TestValidateHTTPRoute(t *testing.T) { }, expectErr: `invalid "hostnames" field: should not populate hostnames`, }, + "no rules": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + }, + }, + "rules with no matches": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "rules with matches that are empty": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + // none + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "path match with no type is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + Path: &pbmesh.HTTPPathMatch{ + Value: "/foo", + }, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid "path" field: invalid "type" field: missing required field`, + }, + "exact path match with no leading slash is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_EXACT, + Value: "foo", + }, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid "path" field: invalid "value" field: exact patch value does not start with '/': "foo"`, + }, + "prefix path match with no leading slash is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX, + Value: "foo", + }, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid "path" field: invalid "value" field: prefix patch value does not start with '/': "foo"`, + }, + "exact path match with leading slash is good": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_EXACT, + Value: "/foo", + }, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "prefix path match with leading slash is good": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX, + Value: "/foo", + }, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "header match with no type is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + Headers: []*pbmesh.HTTPHeaderMatch{{ + Name: "x-foo", + }}, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid element at index 0 of list "headers": invalid "type" field: missing required field`, + }, + "header match with no name is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + Headers: []*pbmesh.HTTPHeaderMatch{{ + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT, + }}, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid element at index 0 of list "headers": invalid "name" field: missing required field`, + }, + "header match is good": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + Headers: []*pbmesh.HTTPHeaderMatch{{ + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT, + Name: "x-foo", + }}, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "queryparam match with no type is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + QueryParams: []*pbmesh.HTTPQueryParamMatch{{ + Name: "x-foo", + }}, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid element at index 0 of list "query_params": invalid "type" field: missing required field`, + }, + "queryparam match with no name is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + QueryParams: []*pbmesh.HTTPQueryParamMatch{{ + Type: pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_EXACT, + }}, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid element at index 0 of list "query_params": invalid "name" field: missing required field`, + }, + "queryparam match is good": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + QueryParams: []*pbmesh.HTTPQueryParamMatch{{ + Type: pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_EXACT, + Name: "x-foo", + }}, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + + // queryparam match with no type is bad + // queryparam match with no name is bad + + // method match with bad name is bad + + // TODO: filters + } // TODO(rb): add rest of tests for the matching logic @@ -338,6 +583,15 @@ func getXRouteBackendRefTestCases() map[string]xRouteBackendRefTestcase { }, expectErr: `invalid element at index 0 of list "rules": invalid element at index 1 of list "backend_refs": invalid "backend_ref" field: invalid "datacenter" field: datacenter is not yet supported on backend refs`, }, + "good backend ref": { + refs: []*pbmesh.BackendReference{ + newBackendRef(catalog.ServiceType, "api", ""), + { + Ref: newRef(catalog.ServiceType, "db"), + Port: "http", + }, + }, + }, } } From b0ab87896c950444bfcc3f60f84b77d5435010bc Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Mon, 21 Aug 2023 15:23:58 -0500 Subject: [PATCH 07/16] more tests --- internal/mesh/internal/types/http_route.go | 9 +- .../mesh/internal/types/http_route_test.go | 311 +++++++++++++++++- 2 files changed, 310 insertions(+), 10 deletions(-) diff --git a/internal/mesh/internal/types/http_route.go b/internal/mesh/internal/types/http_route.go index 73759b571fc7..3422939011ca 100644 --- a/internal/mesh/internal/types/http_route.go +++ b/internal/mesh/internal/types/http_route.go @@ -332,7 +332,7 @@ func validateHTTPTimeouts(timeouts *pbmesh.HTTPRouteTimeouts) []error { val := timeouts.BackendRequest.AsDuration() if val < 0 { errs = append(errs, resource.ErrInvalidField{ - Name: "request", + Name: "backend_request", Wrapped: fmt.Errorf("timeout cannot be negative: %v", val), }) } @@ -357,6 +357,13 @@ func validateHTTPRetries(retries *pbmesh.HTTPRouteRetries) []error { var errs []error + if retries.Number < 0 { + errs = append(errs, resource.ErrInvalidField{ + Name: "number", + Wrapped: fmt.Errorf("cannot be negative: %v", retries.Number), + }) + } + for i, condition := range retries.OnConditions { if !isValidRetryCondition(condition) { errs = append(errs, resource.ErrInvalidListElement{ diff --git a/internal/mesh/internal/types/http_route_test.go b/internal/mesh/internal/types/http_route_test.go index 5f4bb09ad724..057fde01c5f1 100644 --- a/internal/mesh/internal/types/http_route_test.go +++ b/internal/mesh/internal/types/http_route_test.go @@ -5,9 +5,11 @@ package types import ( "testing" + "time" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/resource/resourcetest" @@ -413,19 +415,310 @@ func TestValidateHTTPRoute(t *testing.T) { }}, }, }, + "method match is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + Method: "BOB", + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid "method" field: not a valid http method: "BOB"`, + }, + "method match is good": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + Method: "DELETE", + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "filter empty is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Filters: []*pbmesh.HTTPRouteFilter{{ + // none + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": exactly one of request_header_modifier, response_header_modifier, or url_rewrite`, + }, + "filter req header mod is ok": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Filters: []*pbmesh.HTTPRouteFilter{{ + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "filter resp header mod is ok": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Filters: []*pbmesh.HTTPRouteFilter{{ + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "filter rewrite header mod missing path prefix": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Filters: []*pbmesh.HTTPRouteFilter{{ + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{}, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": invalid "url_rewrite" field: invalid "path_prefix" field: missing required field`, + }, + "filter rewrite header mod is ok": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Filters: []*pbmesh.HTTPRouteFilter{{ + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "/blah", + }, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "filter req+resp header mod is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Filters: []*pbmesh.HTTPRouteFilter{{ + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": exactly one of request_header_modifier, response_header_modifier, or url_rewrite`, + }, + "filter req+rewrite header mod is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Filters: []*pbmesh.HTTPRouteFilter{{ + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "/blah", + }, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": exactly one of request_header_modifier, response_header_modifier, or url_rewrite`, + }, + "filter resp+rewrite header mod is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Filters: []*pbmesh.HTTPRouteFilter{{ + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "/blah", + }, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": exactly one of request_header_modifier, response_header_modifier, or url_rewrite`, + }, + "filter req+resp+rewrite header mod is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Filters: []*pbmesh.HTTPRouteFilter{{ + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "/blah", + }, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": exactly one of request_header_modifier, response_header_modifier, or url_rewrite`, + }, + "timeout: bad request": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: durationpb.New(-1 * time.Second), + }, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid "timeouts" field: invalid "request" field: timeout cannot be negative: -1s`, + }, + "timeout: bad backend request": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Timeouts: &pbmesh.HTTPRouteTimeouts{ + BackendRequest: durationpb.New(-1 * time.Second), + }, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid "timeouts" field: invalid "backend_request" field: timeout cannot be negative: -1s`, + }, + "timeout: bad idle": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Idle: durationpb.New(-1 * time.Second), + }, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid "timeouts" field: invalid "idle" field: timeout cannot be negative: -1s`, + }, + "timeout: good all": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: durationpb.New(1 * time.Second), + BackendRequest: durationpb.New(2 * time.Second), + Idle: durationpb.New(3 * time.Second), + }, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "retries: bad number": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Retries: &pbmesh.HTTPRouteRetries{ + Number: -5, + }, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid "retries" field: invalid "number" field: cannot be negative: -5`, + }, + "retries: bad conditions": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Retries: &pbmesh.HTTPRouteRetries{ + OnConditions: []string{"garbage"}, + }, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid "retries" field: invalid element at index 0 of list "on_conditions": not a valid retry condition: "garbage"`, + }, + "retries: good all": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Retries: &pbmesh.HTTPRouteRetries{ + Number: 5, + OnConditions: []string{"internal"}, + }, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, - // queryparam match with no type is bad - // queryparam match with no name is bad - - // method match with bad name is bad - - // TODO: filters + // retries: bad on_conditions + // retries: good on_conditions } - // TODO(rb): add rest of tests for the matching logic - // TODO(rb): add rest of tests for the retry and timeout logic - // Add common parent refs test cases. for name, parentTC := range getXRouteParentRefTestCases() { cases["parent-ref: "+name] = testcase{ From a37a7ea03da178800de755ef12bc4001943dba03 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Mon, 21 Aug 2023 15:24:33 -0500 Subject: [PATCH 08/16] remove todo --- internal/mesh/internal/types/http_route.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal/mesh/internal/types/http_route.go b/internal/mesh/internal/types/http_route.go index 3422939011ca..7834c7169e10 100644 --- a/internal/mesh/internal/types/http_route.go +++ b/internal/mesh/internal/types/http_route.go @@ -98,8 +98,6 @@ func ValidateHTTPRoute(res *pbresource.Resource) error { } } - // TODO(rb): port a bunch of validation from ServiceRouterConfigEntry.Validate - for j, match := range rule.Matches { wrapMatchErr := func(err error) error { return wrapRuleErr(resource.ErrInvalidListElement{ From 577faee5f0c0e38e1ad81ab7632cbf6a8d4ac264 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Mon, 21 Aug 2023 15:29:19 -0500 Subject: [PATCH 09/16] finish coverage --- .../mesh/internal/types/http_route_test.go | 38 +++++++++++++++++-- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/internal/mesh/internal/types/http_route_test.go b/internal/mesh/internal/types/http_route_test.go index 057fde01c5f1..3d7027f67947 100644 --- a/internal/mesh/internal/types/http_route_test.go +++ b/internal/mesh/internal/types/http_route_test.go @@ -713,10 +713,33 @@ func TestValidateHTTPRoute(t *testing.T) { }}, }, }, - - // retries: bad on_conditions - // retries: good on_conditions - + "backend ref with filters is unsupported": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + Filters: []*pbmesh.HTTPRouteFilter{{ + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + }}, + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "backend_refs": invalid "filters" field: filters are not supported at this level yet`, + }, + "nil backend ref": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + BackendRefs: []*pbmesh.HTTPBackendRef{nil}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "backend_refs": invalid "backend_ref" field: missing required field`, + }, } // Add common parent refs test cases. @@ -814,6 +837,13 @@ func getXRouteParentRefTestCases() map[string]xRouteParentRefTestcase { }, expectErr: `invalid element at index 1 of list "parent_refs": invalid "ref" field: parent ref "catalog.v1alpha1.Service/default.local.default/api" for ports [http] covered by wildcard port already`, }, + "duplicate parents via exact+wild overlap (reversed)": { + refs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "api", ""), + newParentRef(catalog.ServiceType, "api", "http"), + }, + expectErr: `invalid element at index 1 of list "parent_refs": invalid "ref" field: parent ref "catalog.v1alpha1.Service/default.local.default/api" for port "http" covered by wildcard port already`, + }, "good single parent ref": { refs: []*pbmesh.ParentReference{ newParentRef(catalog.ServiceType, "api", "http"), From 57a4a2bc927fcca5a12e419d2fd197b07862baf6 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Mon, 21 Aug 2023 15:54:10 -0500 Subject: [PATCH 10/16] add more grpc route tests --- internal/mesh/internal/types/grpc_route.go | 36 +- .../mesh/internal/types/grpc_route_test.go | 397 +++++++++++++++++- internal/mesh/internal/types/http_route.go | 2 +- .../mesh/internal/types/http_route_test.go | 190 ++++----- 4 files changed, 509 insertions(+), 116 deletions(-) diff --git a/internal/mesh/internal/types/grpc_route.go b/internal/mesh/internal/types/grpc_route.go index eb12fd733ba4..152028827694 100644 --- a/internal/mesh/internal/types/grpc_route.go +++ b/internal/mesh/internal/types/grpc_route.go @@ -39,8 +39,6 @@ func RegisterGRPCRoute(r resource.Registry) { func ValidateGRPCRoute(res *pbresource.Resource) error { var route pbmesh.GRPCRoute - // TODO(rb):sync common stuff from HTTPRoute - if err := res.Data.UnmarshalTo(&route); err != nil { return resource.NewErrDataParse(&route, err) } @@ -66,8 +64,6 @@ func ValidateGRPCRoute(res *pbresource.Resource) error { } } - // TODO(rb): port a bunch of validation from ServiceRouterConfigEntry.Validate - for j, match := range rule.Matches { wrapMatchErr := func(err error) error { return wrapRuleErr(resource.ErrInvalidListElement{ @@ -77,13 +73,23 @@ func ValidateGRPCRoute(res *pbresource.Resource) error { }) } - if match.Method != nil && match.Method.Type == pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_UNSPECIFIED { - merr = multierror.Append(merr, wrapMatchErr( - resource.ErrInvalidField{ - Name: "method", - Wrapped: resource.ErrMissing, - }, - )) + if match.Method != nil { + if match.Method.Type == pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_UNSPECIFIED { + merr = multierror.Append(merr, wrapMatchErr( + resource.ErrInvalidField{ + Name: "type", + Wrapped: resource.ErrMissing, + }, + )) + } + if match.Method.Service == "" && match.Method.Method == "" { + merr = multierror.Append(merr, wrapMatchErr( + resource.ErrInvalidField{ + Name: "service", + Wrapped: errors.New("at least one of \"service\" or \"method\" must be set"), + }, + )) + } } for k, header := range match.Headers { @@ -103,6 +109,14 @@ func ValidateGRPCRoute(res *pbresource.Resource) error { }, )) } + if header.Name == "" { + merr = multierror.Append(merr, wrapMatchHeaderErr( + resource.ErrInvalidField{ + Name: "name", + Wrapped: resource.ErrMissing, + }), + ) + } } } diff --git a/internal/mesh/internal/types/grpc_route_test.go b/internal/mesh/internal/types/grpc_route_test.go index 81f795ad60cf..e5499864b77e 100644 --- a/internal/mesh/internal/types/grpc_route_test.go +++ b/internal/mesh/internal/types/grpc_route_test.go @@ -49,10 +49,403 @@ func TestValidateGRPCRoute(t *testing.T) { }, expectErr: `invalid "hostnames" field: should not populate hostnames`, }, + "no rules": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + }, + }, + "rules with no matches": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "rules with matches that are empty": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Matches: []*pbmesh.GRPCRouteMatch{{ + // none + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "method match with no type is bad": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Matches: []*pbmesh.GRPCRouteMatch{{ + Method: &pbmesh.GRPCMethodMatch{ + Service: "foo", + }, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid "type" field: missing required field`, + }, + "method match with no service nor method is bad": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Matches: []*pbmesh.GRPCRouteMatch{{ + Method: &pbmesh.GRPCMethodMatch{ + Type: pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_EXACT, + }, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid "service" field: at least one of "service" or "method" must be set`, + }, + "method match is good (1)": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Matches: []*pbmesh.GRPCRouteMatch{{ + Method: &pbmesh.GRPCMethodMatch{ + Type: pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_EXACT, + Service: "foo", + }, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "method match is good (2)": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Matches: []*pbmesh.GRPCRouteMatch{{ + Method: &pbmesh.GRPCMethodMatch{ + Type: pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_EXACT, + Method: "bar", + }, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "method match is good (3)": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Matches: []*pbmesh.GRPCRouteMatch{{ + Method: &pbmesh.GRPCMethodMatch{ + Type: pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_EXACT, + Service: "foo", + Method: "bar", + }, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "header match with no type is bad": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Matches: []*pbmesh.GRPCRouteMatch{{ + Headers: []*pbmesh.GRPCHeaderMatch{{ + Name: "x-foo", + }}, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "headers": invalid "type" field: missing required field`, + }, + "header match with no name is bad": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Matches: []*pbmesh.GRPCRouteMatch{{ + Headers: []*pbmesh.GRPCHeaderMatch{{ + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT, + }}, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "headers": invalid "name" field: missing required field`, + }, + "header match is good": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Matches: []*pbmesh.GRPCRouteMatch{{ + Headers: []*pbmesh.GRPCHeaderMatch{{ + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT, + Name: "x-foo", + }}, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "filter empty is bad": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Filters: []*pbmesh.GRPCRouteFilter{{ + // none + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": exactly one of request_header_modifier, response_header_modifier, or url_rewrite`, + }, + "filter req header mod is ok": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Filters: []*pbmesh.GRPCRouteFilter{{ + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "filter resp header mod is ok": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Filters: []*pbmesh.GRPCRouteFilter{{ + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "filter rewrite header mod missing path prefix": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Filters: []*pbmesh.GRPCRouteFilter{{ + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{}, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": invalid "url_rewrite" field: invalid "path_prefix" field: field should not be empty if enclosing section is set`, + }, + "filter rewrite header mod is ok": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Filters: []*pbmesh.GRPCRouteFilter{{ + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "/blah", + }, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + }, + "filter req+resp header mod is bad": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Filters: []*pbmesh.GRPCRouteFilter{{ + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": exactly one of request_header_modifier, response_header_modifier, or url_rewrite`, + }, + "filter req+rewrite header mod is bad": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Filters: []*pbmesh.GRPCRouteFilter{{ + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "/blah", + }, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": exactly one of request_header_modifier, response_header_modifier, or url_rewrite`, + }, + "filter resp+rewrite header mod is bad": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Filters: []*pbmesh.GRPCRouteFilter{{ + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "/blah", + }, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": exactly one of request_header_modifier, response_header_modifier, or url_rewrite`, + }, + "filter req+resp+rewrite header mod is bad": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Filters: []*pbmesh.GRPCRouteFilter{{ + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "/blah", + }, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": exactly one of request_header_modifier, response_header_modifier, or url_rewrite`, + }, + "backend ref with filters is unsupported": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + Filters: []*pbmesh.GRPCRouteFilter{{ + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + }}, + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "backend_refs": invalid "filters" field: filters are not supported at this level yet`, + }, + "nil backend ref": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + BackendRefs: []*pbmesh.GRPCBackendRef{nil}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "backend_refs": invalid "backend_ref" field: missing required field`, + }, + } + + // Add common timeouts test cases. + for name, timeoutsTC := range getXRouteTimeoutsTestCases() { + cases["timeouts: "+name] = testcase{ + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Timeouts: timeoutsTC.timeouts, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: timeoutsTC.expectErr, + } } - // TODO(rb): add rest of tests for the matching logic - // TODO(rb): add rest of tests for the retry and timeout logic + // Add common retries test cases. + for name, retriesTC := range getXRouteRetriesTestCases() { + cases["retries: "+name] = testcase{ + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Retries: retriesTC.retries, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: retriesTC.expectErr, + } + } // Add common parent refs test cases. for name, parentTC := range getXRouteParentRefTestCases() { diff --git a/internal/mesh/internal/types/http_route.go b/internal/mesh/internal/types/http_route.go index 7834c7169e10..4ac846bc8161 100644 --- a/internal/mesh/internal/types/http_route.go +++ b/internal/mesh/internal/types/http_route.go @@ -228,7 +228,7 @@ func ValidateHTTPRoute(res *pbresource.Resource) error { Name: "url_rewrite", Wrapped: resource.ErrInvalidField{ Name: "path_prefix", - Wrapped: resource.ErrMissing, + Wrapped: errors.New("field should not be empty if enclosing section is set"), }, }, )) diff --git a/internal/mesh/internal/types/http_route_test.go b/internal/mesh/internal/types/http_route_test.go index 3d7027f67947..d94c7a36336d 100644 --- a/internal/mesh/internal/types/http_route_test.go +++ b/internal/mesh/internal/types/http_route_test.go @@ -506,7 +506,7 @@ func TestValidateHTTPRoute(t *testing.T) { }}, }}, }, - expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": invalid "url_rewrite" field: invalid "path_prefix" field: missing required field`, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": invalid "url_rewrite" field: invalid "path_prefix" field: field should not be empty if enclosing section is set`, }, "filter rewrite header mod is ok": { route: &pbmesh.HTTPRoute{ @@ -600,146 +600,69 @@ func TestValidateHTTPRoute(t *testing.T) { }, expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "filters": exactly one of request_header_modifier, response_header_modifier, or url_rewrite`, }, - "timeout: bad request": { - route: &pbmesh.HTTPRoute{ - ParentRefs: []*pbmesh.ParentReference{ - newParentRef(catalog.ServiceType, "web", ""), - }, - Rules: []*pbmesh.HTTPRouteRule{{ - Timeouts: &pbmesh.HTTPRouteTimeouts{ - Request: durationpb.New(-1 * time.Second), - }, - BackendRefs: []*pbmesh.HTTPBackendRef{{ - BackendRef: newBackendRef(catalog.ServiceType, "api", ""), - }}, - }}, - }, - expectErr: `invalid element at index 0 of list "rules": invalid "timeouts" field: invalid "request" field: timeout cannot be negative: -1s`, - }, - "timeout: bad backend request": { - route: &pbmesh.HTTPRoute{ - ParentRefs: []*pbmesh.ParentReference{ - newParentRef(catalog.ServiceType, "web", ""), - }, - Rules: []*pbmesh.HTTPRouteRule{{ - Timeouts: &pbmesh.HTTPRouteTimeouts{ - BackendRequest: durationpb.New(-1 * time.Second), - }, - BackendRefs: []*pbmesh.HTTPBackendRef{{ - BackendRef: newBackendRef(catalog.ServiceType, "api", ""), - }}, - }}, - }, - expectErr: `invalid element at index 0 of list "rules": invalid "timeouts" field: invalid "backend_request" field: timeout cannot be negative: -1s`, - }, - "timeout: bad idle": { - route: &pbmesh.HTTPRoute{ - ParentRefs: []*pbmesh.ParentReference{ - newParentRef(catalog.ServiceType, "web", ""), - }, - Rules: []*pbmesh.HTTPRouteRule{{ - Timeouts: &pbmesh.HTTPRouteTimeouts{ - Idle: durationpb.New(-1 * time.Second), - }, - BackendRefs: []*pbmesh.HTTPBackendRef{{ - BackendRef: newBackendRef(catalog.ServiceType, "api", ""), - }}, - }}, - }, - expectErr: `invalid element at index 0 of list "rules": invalid "timeouts" field: invalid "idle" field: timeout cannot be negative: -1s`, - }, - "timeout: good all": { - route: &pbmesh.HTTPRoute{ - ParentRefs: []*pbmesh.ParentReference{ - newParentRef(catalog.ServiceType, "web", ""), - }, - Rules: []*pbmesh.HTTPRouteRule{{ - Timeouts: &pbmesh.HTTPRouteTimeouts{ - Request: durationpb.New(1 * time.Second), - BackendRequest: durationpb.New(2 * time.Second), - Idle: durationpb.New(3 * time.Second), - }, - BackendRefs: []*pbmesh.HTTPBackendRef{{ - BackendRef: newBackendRef(catalog.ServiceType, "api", ""), - }}, - }}, - }, - }, - "retries: bad number": { + "backend ref with filters is unsupported": { route: &pbmesh.HTTPRoute{ ParentRefs: []*pbmesh.ParentReference{ newParentRef(catalog.ServiceType, "web", ""), }, Rules: []*pbmesh.HTTPRouteRule{{ - Retries: &pbmesh.HTTPRouteRetries{ - Number: -5, - }, BackendRefs: []*pbmesh.HTTPBackendRef{{ BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + Filters: []*pbmesh.HTTPRouteFilter{{ + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + }}, }}, }}, }, - expectErr: `invalid element at index 0 of list "rules": invalid "retries" field: invalid "number" field: cannot be negative: -5`, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "backend_refs": invalid "filters" field: filters are not supported at this level yet`, }, - "retries: bad conditions": { + "nil backend ref": { route: &pbmesh.HTTPRoute{ ParentRefs: []*pbmesh.ParentReference{ newParentRef(catalog.ServiceType, "web", ""), }, Rules: []*pbmesh.HTTPRouteRule{{ - Retries: &pbmesh.HTTPRouteRetries{ - OnConditions: []string{"garbage"}, - }, - BackendRefs: []*pbmesh.HTTPBackendRef{{ - BackendRef: newBackendRef(catalog.ServiceType, "api", ""), - }}, + BackendRefs: []*pbmesh.HTTPBackendRef{nil}, }}, }, - expectErr: `invalid element at index 0 of list "rules": invalid "retries" field: invalid element at index 0 of list "on_conditions": not a valid retry condition: "garbage"`, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "backend_refs": invalid "backend_ref" field: missing required field`, }, - "retries: good all": { + } + + // Add common timeouts test cases. + for name, timeoutsTC := range getXRouteTimeoutsTestCases() { + cases["timeouts: "+name] = testcase{ route: &pbmesh.HTTPRoute{ ParentRefs: []*pbmesh.ParentReference{ newParentRef(catalog.ServiceType, "web", ""), }, Rules: []*pbmesh.HTTPRouteRule{{ - Retries: &pbmesh.HTTPRouteRetries{ - Number: 5, - OnConditions: []string{"internal"}, - }, + Timeouts: timeoutsTC.timeouts, BackendRefs: []*pbmesh.HTTPBackendRef{{ BackendRef: newBackendRef(catalog.ServiceType, "api", ""), }}, }}, }, - }, - "backend ref with filters is unsupported": { + expectErr: timeoutsTC.expectErr, + } + } + + // Add common retries test cases. + for name, retriesTC := range getXRouteRetriesTestCases() { + cases["retries: "+name] = testcase{ route: &pbmesh.HTTPRoute{ ParentRefs: []*pbmesh.ParentReference{ newParentRef(catalog.ServiceType, "web", ""), }, Rules: []*pbmesh.HTTPRouteRule{{ + Retries: retriesTC.retries, BackendRefs: []*pbmesh.HTTPBackendRef{{ BackendRef: newBackendRef(catalog.ServiceType, "api", ""), - Filters: []*pbmesh.HTTPRouteFilter{{ - RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, - }}, }}, }}, }, - expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "backend_refs": invalid "filters" field: filters are not supported at this level yet`, - }, - "nil backend ref": { - route: &pbmesh.HTTPRoute{ - ParentRefs: []*pbmesh.ParentReference{ - newParentRef(catalog.ServiceType, "web", ""), - }, - Rules: []*pbmesh.HTTPRouteRule{{ - BackendRefs: []*pbmesh.HTTPBackendRef{nil}, - }}, - }, - expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "backend_refs": invalid "backend_ref" field: missing required field`, - }, + expectErr: retriesTC.expectErr, + } } // Add common parent refs test cases. @@ -918,6 +841,69 @@ func getXRouteBackendRefTestCases() map[string]xRouteBackendRefTestcase { } } +type xRouteTimeoutsTestcase struct { + timeouts *pbmesh.HTTPRouteTimeouts + expectErr string +} + +func getXRouteTimeoutsTestCases() map[string]xRouteTimeoutsTestcase { + return map[string]xRouteTimeoutsTestcase{ + "bad request": { + timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: durationpb.New(-1 * time.Second), + }, + expectErr: `invalid element at index 0 of list "rules": invalid "timeouts" field: invalid "request" field: timeout cannot be negative: -1s`, + }, + "bad backend request": { + timeouts: &pbmesh.HTTPRouteTimeouts{ + BackendRequest: durationpb.New(-1 * time.Second), + }, + expectErr: `invalid element at index 0 of list "rules": invalid "timeouts" field: invalid "backend_request" field: timeout cannot be negative: -1s`, + }, + "bad idle": { + timeouts: &pbmesh.HTTPRouteTimeouts{ + Idle: durationpb.New(-1 * time.Second), + }, + expectErr: `invalid element at index 0 of list "rules": invalid "timeouts" field: invalid "idle" field: timeout cannot be negative: -1s`, + }, + "good all": { + timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: durationpb.New(1 * time.Second), + BackendRequest: durationpb.New(2 * time.Second), + Idle: durationpb.New(3 * time.Second), + }, + }, + } +} + +type xRouteRetriesTestcase struct { + retries *pbmesh.HTTPRouteRetries + expectErr string +} + +func getXRouteRetriesTestCases() map[string]xRouteRetriesTestcase { + return map[string]xRouteRetriesTestcase{ + "bad number": { + retries: &pbmesh.HTTPRouteRetries{ + Number: -5, + }, + expectErr: `invalid element at index 0 of list "rules": invalid "retries" field: invalid "number" field: cannot be negative: -5`, + }, + "bad conditions": { + retries: &pbmesh.HTTPRouteRetries{ + OnConditions: []string{"garbage"}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid "retries" field: invalid element at index 0 of list "on_conditions": not a valid retry condition: "garbage"`, + }, + "good all": { + retries: &pbmesh.HTTPRouteRetries{ + Number: 5, + OnConditions: []string{"internal"}, + }, + }, + } +} + func newRef(typ *pbresource.Type, name string) *pbresource.Reference { return resourcetest.Resource(typ, name).Reference("") } From be5fbf7118cfcb71b6172d08ddad4b5b7a1c9cbb Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Mon, 21 Aug 2023 16:36:55 -0500 Subject: [PATCH 11/16] add another case --- .../mesh/internal/types/computed_routes_test.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/internal/mesh/internal/types/computed_routes_test.go b/internal/mesh/internal/types/computed_routes_test.go index 0f5dacc102e8..49a2bf508dac 100644 --- a/internal/mesh/internal/types/computed_routes_test.go +++ b/internal/mesh/internal/types/computed_routes_test.go @@ -43,6 +43,19 @@ func TestValidateComputedRoutes(t *testing.T) { routes: &pbmesh.ComputedRoutes{}, expectErr: `invalid "ported_configs" field: cannot be empty`, }, + "empty config": { + routes: &pbmesh.ComputedRoutes{ + PortedConfigs: map[string]*pbmesh.ComputedPortRoutes{ + "http": { + Config: nil, + Targets: map[string]*pbmesh.BackendTargetDetails{ + "foo": {}, + }, + }, + }, + }, + expectErr: `invalid value of key "http" within ported_configs: invalid "config" field: cannot be empty`, + }, "empty targets": { routes: &pbmesh.ComputedRoutes{ PortedConfigs: map[string]*pbmesh.ComputedPortRoutes{ From eb59ce73cb6f8caa495a4e040f0e92a69d26f503 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Mon, 21 Aug 2023 16:43:29 -0500 Subject: [PATCH 12/16] update signatures --- internal/mesh/internal/types/computed_routes_test.go | 2 +- internal/mesh/internal/types/destination_policy_test.go | 2 +- internal/mesh/internal/types/grpc_route_test.go | 2 +- internal/mesh/internal/types/http_route_test.go | 4 ++-- internal/mesh/internal/types/tcp_route_test.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/mesh/internal/types/computed_routes_test.go b/internal/mesh/internal/types/computed_routes_test.go index 49a2bf508dac..46b2f890a71e 100644 --- a/internal/mesh/internal/types/computed_routes_test.go +++ b/internal/mesh/internal/types/computed_routes_test.go @@ -28,7 +28,7 @@ func TestValidateComputedRoutes(t *testing.T) { err := ValidateComputedRoutes(res) // Verify that validate didn't actually change the object. - got := resourcetest.MustDecode[pbmesh.ComputedRoutes, *pbmesh.ComputedRoutes](t, res) + got := resourcetest.MustDecode[*pbmesh.ComputedRoutes](t, res) prototest.AssertDeepEqual(t, tc.routes, got.Data) if tc.expectErr == "" { diff --git a/internal/mesh/internal/types/destination_policy_test.go b/internal/mesh/internal/types/destination_policy_test.go index 0d510f876f38..4276c450fdad 100644 --- a/internal/mesh/internal/types/destination_policy_test.go +++ b/internal/mesh/internal/types/destination_policy_test.go @@ -31,7 +31,7 @@ func TestValidateDestinationPolicy(t *testing.T) { err := ValidateDestinationPolicy(res) // Verify that validate didn't actually change the object. - got := resourcetest.MustDecode[pbmesh.DestinationPolicy, *pbmesh.DestinationPolicy](t, res) + got := resourcetest.MustDecode[*pbmesh.DestinationPolicy](t, res) prototest.AssertDeepEqual(t, tc.policy, got.Data) if tc.expectErr != "" && len(tc.expectErrs) > 0 { diff --git a/internal/mesh/internal/types/grpc_route_test.go b/internal/mesh/internal/types/grpc_route_test.go index e5499864b77e..3a7f1f385122 100644 --- a/internal/mesh/internal/types/grpc_route_test.go +++ b/internal/mesh/internal/types/grpc_route_test.go @@ -29,7 +29,7 @@ func TestValidateGRPCRoute(t *testing.T) { err := ValidateGRPCRoute(res) // Verify that validate didn't actually change the object. - got := resourcetest.MustDecode[pbmesh.GRPCRoute, *pbmesh.GRPCRoute](t, res) + got := resourcetest.MustDecode[*pbmesh.GRPCRoute](t, res) prototest.AssertDeepEqual(t, tc.route, got.Data) if tc.expectErr == "" { diff --git a/internal/mesh/internal/types/http_route_test.go b/internal/mesh/internal/types/http_route_test.go index d94c7a36336d..9bfa4bce9392 100644 --- a/internal/mesh/internal/types/http_route_test.go +++ b/internal/mesh/internal/types/http_route_test.go @@ -33,7 +33,7 @@ func TestMutateHTTPRoute(t *testing.T) { err := MutateHTTPRoute(res) - got := resourcetest.MustDecode[pbmesh.HTTPRoute, *pbmesh.HTTPRoute](t, res) + got := resourcetest.MustDecode[*pbmesh.HTTPRoute](t, res) if tc.expectErr == "" { require.NoError(t, err) @@ -161,7 +161,7 @@ func TestValidateHTTPRoute(t *testing.T) { err = ValidateHTTPRoute(res) // Verify that validate didn't actually change the object. - got := resourcetest.MustDecode[pbmesh.HTTPRoute, *pbmesh.HTTPRoute](t, res) + got := resourcetest.MustDecode[*pbmesh.HTTPRoute](t, res) prototest.AssertDeepEqual(t, tc.route, got.Data) if tc.expectErr == "" { diff --git a/internal/mesh/internal/types/tcp_route_test.go b/internal/mesh/internal/types/tcp_route_test.go index 0118fc010bd8..2619a06a3ac2 100644 --- a/internal/mesh/internal/types/tcp_route_test.go +++ b/internal/mesh/internal/types/tcp_route_test.go @@ -29,7 +29,7 @@ func TestValidateTCPRoute(t *testing.T) { err := ValidateTCPRoute(res) // Verify that validate didn't actually change the object. - got := resourcetest.MustDecode[pbmesh.TCPRoute, *pbmesh.TCPRoute](t, res) + got := resourcetest.MustDecode[*pbmesh.TCPRoute](t, res) prototest.AssertDeepEqual(t, tc.route, got.Data) if tc.expectErr == "" { From 8f47382c83ea95cb2d052baa57b23069c8d3681b Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Tue, 22 Aug 2023 10:10:37 -0500 Subject: [PATCH 13/16] Update internal/mesh/internal/types/http_route.go Co-authored-by: Matt Keeler --- internal/mesh/internal/types/http_route.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/mesh/internal/types/http_route.go b/internal/mesh/internal/types/http_route.go index 4ac846bc8161..9c2ad58d0e5c 100644 --- a/internal/mesh/internal/types/http_route.go +++ b/internal/mesh/internal/types/http_route.go @@ -236,7 +236,7 @@ func ValidateHTTPRoute(res *pbresource.Resource) error { } if set != 1 { merr = multierror.Append(merr, wrapFilterErr( - errors.New("exactly one of request_header_modifier, response_header_modifier, or url_rewrite"), + errors.New("exactly one of request_header_modifier, response_header_modifier, or url_rewrite is required"), )) } } From a7f7c80e6c392fcb96901769ba49f2e6ac05d547 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Tue, 22 Aug 2023 10:29:51 -0500 Subject: [PATCH 14/16] validate dest policy enums --- .../mesh/internal/types/destination_policy.go | 62 ++++++++- .../internal/types/destination_policy_test.go | 119 ++++++++++++++++-- 2 files changed, 166 insertions(+), 15 deletions(-) diff --git a/internal/mesh/internal/types/destination_policy.go b/internal/mesh/internal/types/destination_policy.go index ca41a7fbc438..1c1cbd57d2e1 100644 --- a/internal/mesh/internal/types/destination_policy.go +++ b/internal/mesh/internal/types/destination_policy.go @@ -77,13 +77,27 @@ func ValidateDestinationPolicy(res *pbresource.Resource) error { if pc.LoadBalancer != nil { lb := pc.LoadBalancer wrapLBErr := func(err error) error { - return wrapErr(resource.ErrInvalidMapValue{ - Map: "load_balancer", - Key: port, + return wrapErr(resource.ErrInvalidField{ + Name: "load_balancer", Wrapped: err, }) } + switch lb.Policy { + case pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_UNSPECIFIED: + // means just do the default + case pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_RANDOM: + case pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_ROUND_ROBIN: + case pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_LEAST_REQUEST: + case pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_MAGLEV: + case pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_RING_HASH: + default: + merr = multierror.Append(merr, wrapLBErr(resource.ErrInvalidField{ + Name: "policy", + Wrapped: fmt.Errorf("not a supported enum value: %v", lb.Policy), + })) + } + if lb.Policy != pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_RING_HASH && lb.Config != nil { if _, ok := lb.Config.(*pbmesh.LoadBalancer_RingHashConfig); ok { merr = multierror.Append(merr, wrapLBErr(resource.ErrInvalidField{ @@ -109,6 +123,7 @@ func ValidateDestinationPolicy(res *pbresource.Resource) error { })) } + LOOP: for i, hp := range lb.HashPolicies { wrapHPErr := func(err error) error { return wrapLBErr(resource.ErrInvalidListElement{ @@ -118,7 +133,20 @@ func ValidateDestinationPolicy(res *pbresource.Resource) error { }) } - hasField := (hp.Field != pbmesh.HashPolicyField_HASH_POLICY_FIELD_UNSPECIFIED) + var hasField bool + switch hp.Field { + case pbmesh.HashPolicyField_HASH_POLICY_FIELD_UNSPECIFIED: + case pbmesh.HashPolicyField_HASH_POLICY_FIELD_HEADER, + pbmesh.HashPolicyField_HASH_POLICY_FIELD_COOKIE, + pbmesh.HashPolicyField_HASH_POLICY_FIELD_QUERY_PARAMETER: + hasField = true + default: + merr = multierror.Append(merr, wrapHPErr(resource.ErrInvalidField{ + Name: "field", + Wrapped: fmt.Errorf("not a supported enum value: %v", hp.Field), + })) + continue LOOP // no need to keep validating + } if hp.SourceIp { if hasField { @@ -142,6 +170,10 @@ func ValidateDestinationPolicy(res *pbresource.Resource) error { })) } if hp.FieldValue != "" && !hasField { + merr = multierror.Append(merr, wrapHPErr(resource.ErrInvalidField{ + Name: "field", + Wrapped: resource.ErrMissing, + })) merr = multierror.Append(merr, wrapHPErr(resource.ErrInvalidField{ Name: "field_value", Wrapped: errors.New("requires a field to apply to"), @@ -166,6 +198,28 @@ func ValidateDestinationPolicy(res *pbresource.Resource) error { } } } + + if pc.LocalityPrioritization != nil { + lp := pc.LocalityPrioritization + wrapLPErr := func(err error) error { + return wrapErr(resource.ErrInvalidField{ + Name: "locality_prioritization", + Wrapped: err, + }) + } + + switch lp.Mode { + case pbmesh.LocalityPrioritizationMode_LOCALITY_PRIORITIZATION_MODE_UNSPECIFIED: + // means pbmesh.LocalityPrioritizationMode_LOCALITY_PRIORITIZATION_MODE_NONE + case pbmesh.LocalityPrioritizationMode_LOCALITY_PRIORITIZATION_MODE_NONE: + case pbmesh.LocalityPrioritizationMode_LOCALITY_PRIORITIZATION_MODE_FAILOVER: + default: + merr = multierror.Append(merr, wrapLPErr(resource.ErrInvalidField{ + Name: "mode", + Wrapped: fmt.Errorf("not a supported enum value: %v", lp.Mode), + })) + } + } } return merr diff --git a/internal/mesh/internal/types/destination_policy_test.go b/internal/mesh/internal/types/destination_policy_test.go index 4276c450fdad..960c42aedea2 100644 --- a/internal/mesh/internal/types/destination_policy_test.go +++ b/internal/mesh/internal/types/destination_policy_test.go @@ -94,6 +94,29 @@ func TestValidateDestinationPolicy(t *testing.T) { expectErr: `invalid value of key "http" within port_configs: invalid "request_timeout" field: '-55s', must be >= 0`, }, // load balancer + "lbpolicy: missing enum": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{}, + }, + }, + }, + }, + "lbpolicy: bad enum": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: 99, + }, + }, + }, + }, + expectErr: `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid "policy" field: not a supported enum value: 99`, + }, "lbpolicy: supported": { policy: &pbmesh.DestinationPolicy{ PortConfigs: map[string]*pbmesh.DestinationConfig{ @@ -122,7 +145,7 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, }, - expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid "config" field: least_request_config specified for incompatible load balancing policy "LOAD_BALANCER_POLICY_RING_HASH"`, + expectErr: `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid "config" field: least_request_config specified for incompatible load balancing policy "LOAD_BALANCER_POLICY_RING_HASH"`, }, "lbpolicy: bad for ring hash config": { policy: &pbmesh.DestinationPolicy{ @@ -140,7 +163,7 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, }, - expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid "config" field: ring_hash_config specified for incompatible load balancing policy "LOAD_BALANCER_POLICY_LEAST_REQUEST"`, + expectErr: `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid "config" field: ring_hash_config specified for incompatible load balancing policy "LOAD_BALANCER_POLICY_LEAST_REQUEST"`, }, "lbpolicy: good for least request config": { policy: &pbmesh.DestinationPolicy{ @@ -189,7 +212,7 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, }, - expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid "hash_policies" field: hash_policies specified for non-hash-based policy "LOAD_BALANCER_POLICY_UNSPECIFIED"`, + expectErr: `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid "hash_policies" field: hash_policies specified for non-hash-based policy "LOAD_BALANCER_POLICY_UNSPECIFIED"`, }, "lbconfig: cookie config with header policy": { policy: &pbmesh.DestinationPolicy{ @@ -212,7 +235,7 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, }, - expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "cookie_config" field: incompatible with field "HASH_POLICY_FIELD_HEADER"`, + expectErr: `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid element at index 0 of list "hash_policies": invalid "cookie_config" field: incompatible with field "HASH_POLICY_FIELD_HEADER"`, }, "lbconfig: cannot generate session cookie with ttl": { policy: &pbmesh.DestinationPolicy{ @@ -235,7 +258,7 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, }, - expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "cookie_config" field: invalid "ttl" field: a session cookie cannot have an associated TTL`, + expectErr: `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid element at index 0 of list "hash_policies": invalid "cookie_config" field: invalid "ttl" field: a session cookie cannot have an associated TTL`, }, "lbconfig: valid cookie policy": { policy: &pbmesh.DestinationPolicy{ @@ -259,6 +282,25 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, }, + "lbconfig: bad match field": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_MAGLEV, + HashPolicies: []*pbmesh.HashPolicy{ + { + Field: 99, + FieldValue: "X-Consul-Token", + }, + }, + }, + }, + }, + }, + expectErr: `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid element at index 0 of list "hash_policies": invalid "field" field: not a supported enum value: 99`, + }, "lbconfig: supported match field": { policy: &pbmesh.DestinationPolicy{ PortConfigs: map[string]*pbmesh.DestinationConfig{ @@ -277,6 +319,27 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, }, + "lbconfig: missing match field": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + LoadBalancer: &pbmesh.LoadBalancer{ + Policy: pbmesh.LoadBalancerPolicy_LOAD_BALANCER_POLICY_MAGLEV, + HashPolicies: []*pbmesh.HashPolicy{ + { + FieldValue: "X-Consul-Token", + }, + }, + }, + }, + }, + }, + expectErrs: []string{ + `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid element at index 0 of list "hash_policies": invalid "field" field: missing required field`, + `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid element at index 0 of list "hash_policies": invalid "field_value" field: requires a field to apply to`, + }, + }, "lbconfig: cannot match on source address and custom field": { policy: &pbmesh.DestinationPolicy{ PortConfigs: map[string]*pbmesh.DestinationConfig{ @@ -295,8 +358,8 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, expectErrs: []string{ - `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "field" field: a single hash policy cannot hash both a source address and a "HASH_POLICY_FIELD_HEADER"`, - `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "field_value" field: field "HASH_POLICY_FIELD_HEADER" was specified without a field_value`, + `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid element at index 0 of list "hash_policies": invalid "field" field: a single hash policy cannot hash both a source address and a "HASH_POLICY_FIELD_HEADER"`, + `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid element at index 0 of list "hash_policies": invalid "field_value" field: field "HASH_POLICY_FIELD_HEADER" was specified without a field_value`, }, }, "lbconfig: matchvalue not compatible with source address": { @@ -317,8 +380,8 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, expectErrs: []string{ - `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "field_value" field: cannot be specified when hashing source_ip`, - `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "field_value" field: requires a field to apply to`, + `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid element at index 0 of list "hash_policies": invalid "field_value" field: cannot be specified when hashing source_ip`, + `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid element at index 0 of list "hash_policies": invalid "field_value" field: requires a field to apply to`, }, }, "lbconfig: field without match value": { @@ -337,7 +400,7 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, }, - expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "field_value" field: field "HASH_POLICY_FIELD_HEADER" was specified without a field_value`, + expectErr: `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid element at index 0 of list "hash_policies": invalid "field_value" field: field "HASH_POLICY_FIELD_HEADER" was specified without a field_value`, }, "lbconfig: matchvalue without field": { policy: &pbmesh.DestinationPolicy{ @@ -355,7 +418,7 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, }, - expectErr: `invalid value of key "http" within port_configs: invalid value of key "http" within load_balancer: invalid element at index 0 of list "hash_policies": invalid "field_value" field: requires a field to apply to`, + expectErr: `invalid value of key "http" within port_configs: invalid "load_balancer" field: invalid element at index 0 of list "hash_policies": invalid "field_value" field: requires a field to apply to`, }, "lbconfig: ring hash kitchen sink": { policy: &pbmesh.DestinationPolicy{ @@ -403,6 +466,40 @@ func TestValidateDestinationPolicy(t *testing.T) { }, }, }, + "locality: good mode": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + LocalityPrioritization: &pbmesh.LocalityPrioritization{ + Mode: pbmesh.LocalityPrioritizationMode_LOCALITY_PRIORITIZATION_MODE_FAILOVER, + }, + }, + }, + }, + }, + "locality: unset mode": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + LocalityPrioritization: &pbmesh.LocalityPrioritization{ + Mode: pbmesh.LocalityPrioritizationMode_LOCALITY_PRIORITIZATION_MODE_UNSPECIFIED, + }, + }, + }, + }, + }, + "locality: bad mode": { + policy: &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + LocalityPrioritization: &pbmesh.LocalityPrioritization{ + Mode: 99, + }, + }, + }, + }, + expectErr: `invalid value of key "http" within port_configs: invalid "locality_prioritization" field: invalid "mode" field: not a supported enum value: 99`, + }, } for name, tc := range cases { From 7e61a8eeaa0d48a64cc5991b3fa498d9147859df Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Tue, 22 Aug 2023 10:57:55 -0500 Subject: [PATCH 15/16] validate xroute enums --- internal/mesh/internal/types/grpc_route.go | 22 +- .../mesh/internal/types/grpc_route_test.go | 38 +++ internal/mesh/internal/types/http_route.go | 284 ++---------------- .../mesh/internal/types/http_route_test.go | 57 ++++ internal/mesh/internal/types/xroute.go | 276 +++++++++++++++++ 5 files changed, 413 insertions(+), 264 deletions(-) diff --git a/internal/mesh/internal/types/grpc_route.go b/internal/mesh/internal/types/grpc_route.go index 152028827694..a159ee3afa30 100644 --- a/internal/mesh/internal/types/grpc_route.go +++ b/internal/mesh/internal/types/grpc_route.go @@ -5,6 +5,7 @@ package types import ( "errors" + "fmt" "github.com/hashicorp/go-multierror" @@ -74,13 +75,23 @@ func ValidateGRPCRoute(res *pbresource.Resource) error { } if match.Method != nil { - if match.Method.Type == pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_UNSPECIFIED { + switch match.Method.Type { + case pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_UNSPECIFIED: merr = multierror.Append(merr, wrapMatchErr( resource.ErrInvalidField{ Name: "type", Wrapped: resource.ErrMissing, }, )) + case pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_EXACT: + case pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_REGEX: + default: + merr = multierror.Append(merr, wrapMatchErr( + resource.ErrInvalidField{ + Name: "type", + Wrapped: fmt.Errorf("not a supported enum value: %v", match.Method.Type), + }, + )) } if match.Method.Service == "" && match.Method.Method == "" { merr = multierror.Append(merr, wrapMatchErr( @@ -101,14 +112,15 @@ func ValidateGRPCRoute(res *pbresource.Resource) error { }) } - if header.Type == pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_UNSPECIFIED { + if err := validateHeaderMatchType(header.Type); err != nil { merr = multierror.Append(merr, wrapMatchHeaderErr( resource.ErrInvalidField{ Name: "type", - Wrapped: resource.ErrMissing, - }, - )) + Wrapped: err, + }), + ) } + if header.Name == "" { merr = multierror.Append(merr, wrapMatchHeaderErr( resource.ErrInvalidField{ diff --git a/internal/mesh/internal/types/grpc_route_test.go b/internal/mesh/internal/types/grpc_route_test.go index 3a7f1f385122..e9abc118c654 100644 --- a/internal/mesh/internal/types/grpc_route_test.go +++ b/internal/mesh/internal/types/grpc_route_test.go @@ -101,6 +101,25 @@ func TestValidateGRPCRoute(t *testing.T) { }, expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid "type" field: missing required field`, }, + "method match with unknown type is bad": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Matches: []*pbmesh.GRPCRouteMatch{{ + Method: &pbmesh.GRPCMethodMatch{ + Type: 99, + Service: "foo", + }, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid "type" field: not a supported enum value: 99`, + }, "method match with no service nor method is bad": { route: &pbmesh.GRPCRoute{ ParentRefs: []*pbmesh.ParentReference{ @@ -192,6 +211,25 @@ func TestValidateGRPCRoute(t *testing.T) { }, expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "headers": invalid "type" field: missing required field`, }, + "header match with unknown type is bad": { + route: &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.GRPCRouteRule{{ + Matches: []*pbmesh.GRPCRouteMatch{{ + Headers: []*pbmesh.GRPCHeaderMatch{{ + Type: 99, + Name: "x-foo", + }}, + }}, + BackendRefs: []*pbmesh.GRPCBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "headers": invalid "type" field: not a supported enum value: 99`, + }, "header match with no name is bad": { route: &pbmesh.GRPCRoute{ ParentRefs: []*pbmesh.ParentReference{ diff --git a/internal/mesh/internal/types/http_route.go b/internal/mesh/internal/types/http_route.go index 9c2ad58d0e5c..d348091a7e92 100644 --- a/internal/mesh/internal/types/http_route.go +++ b/internal/mesh/internal/types/http_route.go @@ -11,7 +11,6 @@ import ( "github.com/hashicorp/go-multierror" - "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/resource" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -140,6 +139,13 @@ func ValidateHTTPRoute(res *pbresource.Resource) error { }, )) } + default: + merr = multierror.Append(merr, wrapMatchPathErr( + resource.ErrInvalidField{ + Name: "type", + Wrapped: fmt.Errorf("not a supported enum value: %v", match.Path.Type), + }, + )) } } @@ -151,14 +157,16 @@ func ValidateHTTPRoute(res *pbresource.Resource) error { Wrapped: err, }) } - if hdr.Type == pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_UNSPECIFIED { + + if err := validateHeaderMatchType(hdr.Type); err != nil { merr = multierror.Append(merr, wrapMatchHeaderErr( resource.ErrInvalidField{ Name: "type", - Wrapped: resource.ErrMissing, + Wrapped: err, }), ) } + if hdr.Name == "" { merr = multierror.Append(merr, wrapMatchHeaderErr( resource.ErrInvalidField{ @@ -177,14 +185,27 @@ func ValidateHTTPRoute(res *pbresource.Resource) error { Wrapped: err, }) } - if qm.Type == pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_UNSPECIFIED { + + switch qm.Type { + case pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_UNSPECIFIED: merr = multierror.Append(merr, wrapMatchParamErr( resource.ErrInvalidField{ Name: "type", Wrapped: resource.ErrMissing, }), ) + case pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_EXACT: + case pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_REGEX: + case pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_PRESENT: + default: + merr = multierror.Append(merr, wrapMatchParamErr( + resource.ErrInvalidField{ + Name: "type", + Wrapped: fmt.Errorf("not a supported enum value: %v", qm.Type), + }, + )) } + if qm.Name == "" { merr = multierror.Append(merr, wrapMatchParamErr( resource.ErrInvalidField{ @@ -310,241 +331,6 @@ func ValidateHTTPRoute(res *pbresource.Resource) error { return merr } -func validateHTTPTimeouts(timeouts *pbmesh.HTTPRouteTimeouts) []error { - if timeouts == nil { - return nil - } - - var errs []error - - if timeouts.Request != nil { - val := timeouts.Request.AsDuration() - if val < 0 { - errs = append(errs, resource.ErrInvalidField{ - Name: "request", - Wrapped: fmt.Errorf("timeout cannot be negative: %v", val), - }) - } - } - if timeouts.BackendRequest != nil { - val := timeouts.BackendRequest.AsDuration() - if val < 0 { - errs = append(errs, resource.ErrInvalidField{ - Name: "backend_request", - Wrapped: fmt.Errorf("timeout cannot be negative: %v", val), - }) - } - } - if timeouts.Idle != nil { - val := timeouts.Idle.AsDuration() - if val < 0 { - errs = append(errs, resource.ErrInvalidField{ - Name: "idle", - Wrapped: fmt.Errorf("timeout cannot be negative: %v", val), - }) - } - } - - return errs -} - -func validateHTTPRetries(retries *pbmesh.HTTPRouteRetries) []error { - if retries == nil { - return nil - } - - var errs []error - - if retries.Number < 0 { - errs = append(errs, resource.ErrInvalidField{ - Name: "number", - Wrapped: fmt.Errorf("cannot be negative: %v", retries.Number), - }) - } - - for i, condition := range retries.OnConditions { - if !isValidRetryCondition(condition) { - errs = append(errs, resource.ErrInvalidListElement{ - Name: "on_conditions", - Index: i, - Wrapped: fmt.Errorf("not a valid retry condition: %q", condition), - }) - } - } - - return errs -} - -func validateBackendRef(backendRef *pbmesh.BackendReference) []error { - var errs []error - if backendRef == nil { - errs = append(errs, resource.ErrMissing) - - } else if backendRef.Ref == nil { - errs = append(errs, resource.ErrInvalidField{ - Name: "ref", - Wrapped: resource.ErrMissing, - }) - - } else { - if !IsServiceType(backendRef.Ref.Type) { - errs = append(errs, resource.ErrInvalidField{ - Name: "ref", - Wrapped: resource.ErrInvalidReferenceType{ - AllowedType: catalog.ServiceType, - }, - }) - } - - if backendRef.Ref.Section != "" { - errs = append(errs, resource.ErrInvalidField{ - Name: "ref", - Wrapped: resource.ErrInvalidField{ - Name: "section", - Wrapped: errors.New("section not supported for service backend refs"), - }, - }) - } - - if backendRef.Datacenter != "" { - errs = append(errs, resource.ErrInvalidField{ - Name: "datacenter", - Wrapped: errors.New("datacenter is not yet supported on backend refs"), - }) - } - } - return errs -} - -type portedRefKey struct { - Key resource.ReferenceKey - Port string -} - -func validateParentRefs(parentRefs []*pbmesh.ParentReference) error { - var merr error - if len(parentRefs) == 0 { - merr = multierror.Append(merr, resource.ErrInvalidField{ - Name: "parent_refs", - Wrapped: resource.ErrEmpty, - }) - } - - var ( - seen = make(map[portedRefKey]struct{}) - seenAny = make(map[resource.ReferenceKey][]string) - ) - for i, parent := range parentRefs { - wrapErr := func(err error) error { - return resource.ErrInvalidListElement{ - Name: "parent_refs", - Index: i, - Wrapped: err, - } - } - if parent.Ref == nil { - merr = multierror.Append(merr, wrapErr( - resource.ErrInvalidField{ - Name: "ref", - Wrapped: resource.ErrMissing, - }, - )) - } else { - if !IsServiceType(parent.Ref.Type) { - merr = multierror.Append(merr, wrapErr( - resource.ErrInvalidField{ - Name: "ref", - Wrapped: resource.ErrInvalidReferenceType{ - AllowedType: catalog.ServiceType, - }, - }, - )) - } - if parent.Ref.Section != "" { - merr = multierror.Append(merr, wrapErr( - resource.ErrInvalidField{ - Name: "ref", - Wrapped: resource.ErrInvalidField{ - Name: "section", - Wrapped: errors.New("section not supported for service parent refs"), - }, - }, - )) - } - - prk := portedRefKey{ - Key: resource.NewReferenceKey(parent.Ref), - Port: parent.Port, - } - - _, portExist := seen[prk] - - if parent.Port == "" { - coveredPorts, exactExists := seenAny[prk.Key] - - if portExist { // check for duplicate wild - merr = multierror.Append(merr, wrapErr( - resource.ErrInvalidField{ - Name: "ref", - Wrapped: fmt.Errorf( - "parent ref %q for wildcard port exists twice", - resource.ReferenceToString(parent.Ref), - ), - }, - )) - } else if exactExists { // check for existing exact - merr = multierror.Append(merr, wrapErr( - resource.ErrInvalidField{ - Name: "ref", - Wrapped: fmt.Errorf( - "parent ref %q for ports %v covered by wildcard port already", - resource.ReferenceToString(parent.Ref), - coveredPorts, - ), - }, - )) - } else { - seen[prk] = struct{}{} - } - - } else { - prkWild := prk - prkWild.Port = "" - _, wildExist := seen[prkWild] - - if portExist { // check for duplicate exact - merr = multierror.Append(merr, wrapErr( - resource.ErrInvalidField{ - Name: "ref", - Wrapped: fmt.Errorf( - "parent ref %q for port %q exists twice", - resource.ReferenceToString(parent.Ref), - parent.Port, - ), - }, - )) - } else if wildExist { // check for existing wild - merr = multierror.Append(merr, wrapErr( - resource.ErrInvalidField{ - Name: "ref", - Wrapped: fmt.Errorf( - "parent ref %q for port %q covered by wildcard port already", - resource.ReferenceToString(parent.Ref), - parent.Port, - ), - }, - )) - } else { - seen[prk] = struct{}{} - seenAny[prk.Key] = append(seenAny[prk.Key], parent.Port) - } - } - } - } - - return merr -} - func isValidHTTPMethod(method string) bool { switch method { case http.MethodGet, @@ -561,23 +347,3 @@ func isValidHTTPMethod(method string) bool { return false } } - -func isValidRetryCondition(retryOn string) bool { - switch retryOn { - case "5xx", - "gateway-error", - "reset", - "connect-failure", - "envoy-ratelimited", - "retriable-4xx", - "refused-stream", - "cancelled", - "deadline-exceeded", - "internal", - "resource-exhausted", - "unavailable": - return true - default: - return false - } -} diff --git a/internal/mesh/internal/types/http_route_test.go b/internal/mesh/internal/types/http_route_test.go index 9bfa4bce9392..0f4626aa7094 100644 --- a/internal/mesh/internal/types/http_route_test.go +++ b/internal/mesh/internal/types/http_route_test.go @@ -233,6 +233,25 @@ func TestValidateHTTPRoute(t *testing.T) { }, expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid "path" field: invalid "type" field: missing required field`, }, + "path match with unknown type is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + Path: &pbmesh.HTTPPathMatch{ + Type: 99, + Value: "/foo", + }, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid "path" field: invalid "type" field: not a supported enum value: 99`, + }, "exact path match with no leading slash is bad": { route: &pbmesh.HTTPRoute{ ParentRefs: []*pbmesh.ParentReference{ @@ -325,6 +344,25 @@ func TestValidateHTTPRoute(t *testing.T) { }, expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid element at index 0 of list "headers": invalid "type" field: missing required field`, }, + "header match with unknown type is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + Headers: []*pbmesh.HTTPHeaderMatch{{ + Type: 99, + Name: "x-foo", + }}, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid element at index 0 of list "headers": invalid "type" field: not a supported enum value: 99`, + }, "header match with no name is bad": { route: &pbmesh.HTTPRoute{ ParentRefs: []*pbmesh.ParentReference{ @@ -379,6 +417,25 @@ func TestValidateHTTPRoute(t *testing.T) { }, expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid element at index 0 of list "query_params": invalid "type" field: missing required field`, }, + "queryparam match with unknown type is bad": { + route: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + newParentRef(catalog.ServiceType, "web", ""), + }, + Rules: []*pbmesh.HTTPRouteRule{{ + Matches: []*pbmesh.HTTPRouteMatch{{ + QueryParams: []*pbmesh.HTTPQueryParamMatch{{ + Type: 99, + Name: "x-foo", + }}, + }}, + BackendRefs: []*pbmesh.HTTPBackendRef{{ + BackendRef: newBackendRef(catalog.ServiceType, "api", ""), + }}, + }}, + }, + expectErr: `invalid element at index 0 of list "rules": invalid element at index 0 of list "matches": invalid element at index 0 of list "query_params": invalid "type" field: not a supported enum value: 99`, + }, "queryparam match with no name is bad": { route: &pbmesh.HTTPRoute{ ParentRefs: []*pbmesh.ParentReference{ diff --git a/internal/mesh/internal/types/xroute.go b/internal/mesh/internal/types/xroute.go index b7b190269623..572e7f53138b 100644 --- a/internal/mesh/internal/types/xroute.go +++ b/internal/mesh/internal/types/xroute.go @@ -4,8 +4,14 @@ package types import ( + "errors" + "fmt" + + "github.com/hashicorp/go-multierror" "google.golang.org/protobuf/proto" + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/resource" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" ) @@ -18,3 +24,273 @@ type XRouteWithRefs interface { GetParentRefs() []*pbmesh.ParentReference GetUnderlyingBackendRefs() []*pbmesh.BackendReference } + +type portedRefKey struct { + Key resource.ReferenceKey + Port string +} + +func validateParentRefs(parentRefs []*pbmesh.ParentReference) error { + var merr error + if len(parentRefs) == 0 { + merr = multierror.Append(merr, resource.ErrInvalidField{ + Name: "parent_refs", + Wrapped: resource.ErrEmpty, + }) + } + + var ( + seen = make(map[portedRefKey]struct{}) + seenAny = make(map[resource.ReferenceKey][]string) + ) + for i, parent := range parentRefs { + wrapErr := func(err error) error { + return resource.ErrInvalidListElement{ + Name: "parent_refs", + Index: i, + Wrapped: err, + } + } + if parent.Ref == nil { + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: resource.ErrMissing, + }, + )) + } else { + if !IsServiceType(parent.Ref.Type) { + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: resource.ErrInvalidReferenceType{ + AllowedType: catalog.ServiceType, + }, + }, + )) + } + if parent.Ref.Section != "" { + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: resource.ErrInvalidField{ + Name: "section", + Wrapped: errors.New("section not supported for service parent refs"), + }, + }, + )) + } + + prk := portedRefKey{ + Key: resource.NewReferenceKey(parent.Ref), + Port: parent.Port, + } + + _, portExist := seen[prk] + + if parent.Port == "" { + coveredPorts, exactExists := seenAny[prk.Key] + + if portExist { // check for duplicate wild + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: fmt.Errorf( + "parent ref %q for wildcard port exists twice", + resource.ReferenceToString(parent.Ref), + ), + }, + )) + } else if exactExists { // check for existing exact + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: fmt.Errorf( + "parent ref %q for ports %v covered by wildcard port already", + resource.ReferenceToString(parent.Ref), + coveredPorts, + ), + }, + )) + } else { + seen[prk] = struct{}{} + } + + } else { + prkWild := prk + prkWild.Port = "" + _, wildExist := seen[prkWild] + + if portExist { // check for duplicate exact + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: fmt.Errorf( + "parent ref %q for port %q exists twice", + resource.ReferenceToString(parent.Ref), + parent.Port, + ), + }, + )) + } else if wildExist { // check for existing wild + merr = multierror.Append(merr, wrapErr( + resource.ErrInvalidField{ + Name: "ref", + Wrapped: fmt.Errorf( + "parent ref %q for port %q covered by wildcard port already", + resource.ReferenceToString(parent.Ref), + parent.Port, + ), + }, + )) + } else { + seen[prk] = struct{}{} + seenAny[prk.Key] = append(seenAny[prk.Key], parent.Port) + } + } + } + } + + return merr +} + +func validateBackendRef(backendRef *pbmesh.BackendReference) []error { + var errs []error + if backendRef == nil { + errs = append(errs, resource.ErrMissing) + + } else if backendRef.Ref == nil { + errs = append(errs, resource.ErrInvalidField{ + Name: "ref", + Wrapped: resource.ErrMissing, + }) + + } else { + if !IsServiceType(backendRef.Ref.Type) { + errs = append(errs, resource.ErrInvalidField{ + Name: "ref", + Wrapped: resource.ErrInvalidReferenceType{ + AllowedType: catalog.ServiceType, + }, + }) + } + + if backendRef.Ref.Section != "" { + errs = append(errs, resource.ErrInvalidField{ + Name: "ref", + Wrapped: resource.ErrInvalidField{ + Name: "section", + Wrapped: errors.New("section not supported for service backend refs"), + }, + }) + } + + if backendRef.Datacenter != "" { + errs = append(errs, resource.ErrInvalidField{ + Name: "datacenter", + Wrapped: errors.New("datacenter is not yet supported on backend refs"), + }) + } + } + return errs +} + +func validateHeaderMatchType(typ pbmesh.HeaderMatchType) error { + switch typ { + case pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_UNSPECIFIED: + return resource.ErrMissing + case pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT: + case pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_REGEX: + case pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PRESENT: + case pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PREFIX: + case pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_SUFFIX: + default: + return fmt.Errorf("not a supported enum value: %v", typ) + } + return nil +} + +func validateHTTPTimeouts(timeouts *pbmesh.HTTPRouteTimeouts) []error { + if timeouts == nil { + return nil + } + + var errs []error + + if timeouts.Request != nil { + val := timeouts.Request.AsDuration() + if val < 0 { + errs = append(errs, resource.ErrInvalidField{ + Name: "request", + Wrapped: fmt.Errorf("timeout cannot be negative: %v", val), + }) + } + } + if timeouts.BackendRequest != nil { + val := timeouts.BackendRequest.AsDuration() + if val < 0 { + errs = append(errs, resource.ErrInvalidField{ + Name: "backend_request", + Wrapped: fmt.Errorf("timeout cannot be negative: %v", val), + }) + } + } + if timeouts.Idle != nil { + val := timeouts.Idle.AsDuration() + if val < 0 { + errs = append(errs, resource.ErrInvalidField{ + Name: "idle", + Wrapped: fmt.Errorf("timeout cannot be negative: %v", val), + }) + } + } + + return errs +} + +func validateHTTPRetries(retries *pbmesh.HTTPRouteRetries) []error { + if retries == nil { + return nil + } + + var errs []error + + if retries.Number < 0 { + errs = append(errs, resource.ErrInvalidField{ + Name: "number", + Wrapped: fmt.Errorf("cannot be negative: %v", retries.Number), + }) + } + + for i, condition := range retries.OnConditions { + if !isValidRetryCondition(condition) { + errs = append(errs, resource.ErrInvalidListElement{ + Name: "on_conditions", + Index: i, + Wrapped: fmt.Errorf("not a valid retry condition: %q", condition), + }) + } + } + + return errs +} + +func isValidRetryCondition(retryOn string) bool { + switch retryOn { + case "5xx", + "gateway-error", + "reset", + "connect-failure", + "envoy-ratelimited", + "retriable-4xx", + "refused-stream", + "cancelled", + "deadline-exceeded", + "internal", + "resource-exhausted", + "unavailable": + return true + default: + return false + } +} From 160cfc68960ae8f7e3aa2278a9b6f8d2e4c16a0e Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Tue, 22 Aug 2023 11:02:55 -0500 Subject: [PATCH 16/16] Update internal/mesh/internal/types/grpc_route.go Co-authored-by: Matt Keeler --- internal/mesh/internal/types/grpc_route.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/mesh/internal/types/grpc_route.go b/internal/mesh/internal/types/grpc_route.go index a159ee3afa30..0d6c8df07f4e 100644 --- a/internal/mesh/internal/types/grpc_route.go +++ b/internal/mesh/internal/types/grpc_route.go @@ -163,7 +163,7 @@ func ValidateGRPCRoute(res *pbresource.Resource) error { } if set != 1 { merr = multierror.Append(merr, wrapFilterErr( - errors.New("exactly one of request_header_modifier, response_header_modifier, or url_rewrite"), + errors.New("exactly one of request_header_modifier, response_header_modifier, or url_rewrite is required"), )) } }